diff --git a/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md b/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md index 2cbe4310a..ee3a704ea 100644 --- a/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md +++ b/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md @@ -9,7 +9,7 @@ assignees: '' Please, answer some short questions which should help us to understand your problem / question better? -- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.10.1 +- **Which image of the operator are you using?** e.g. ghcr.io/zalando/postgres-operator:v1.13.0 - **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s] - **Are you running Postgres Operator in production?** [yes | no] - **Type of issue?** [Bug report, question, feature request, etc.] diff --git a/.github/workflows/publish_ghcr_image.yaml b/.github/workflows/publish_ghcr_image.yaml index b81298e6a..d56ff2f17 100644 --- a/.github/workflows/publish_ghcr_image.yaml +++ b/.github/workflows/publish_ghcr_image.yaml @@ -1,13 +1,15 @@ -name: Publish multiarch postgres-operator image on ghcr.io +name: Publish multiarch postgres-operator images on ghcr.io env: REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} + IMAGE_NAME_UI: ${{ github.repository }}-ui on: push: tags: - '*' + jobs: publish: name: Build, test and push image @@ -21,7 +23,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "^1.19.8" + go-version: "^1.23.4" - name: Run unit tests run: make deps mocks test @@ -29,8 +31,20 @@ jobs: - name: Define image name id: image run: | - IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${GITHUB_REF/refs\/tags\//}" - echo "NAME=$IMAGE" >> $GITHUB_OUTPUT + OPERATOR_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${GITHUB_REF/refs\/tags\//}" + echo "OPERATOR_IMAGE=$OPERATOR_IMAGE" >> $GITHUB_OUTPUT + + - name: Define UI image name + id: image_ui + run: | + UI_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME_UI }}:${GITHUB_REF/refs\/tags\//}" + echo "UI_IMAGE=$UI_IMAGE" >> $GITHUB_OUTPUT + + - name: Define logical backup image name + id: image_lb + run: | + BACKUP_IMAGE="${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}/logical-backup:${GITHUB_REF_NAME}" + echo "BACKUP_IMAGE=$BACKUP_IMAGE" >> $GITHUB_OUTPUT - name: Set up QEMU uses: docker/setup-qemu-action@v2 @@ -45,12 +59,30 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push multiarch image to ghcr + - name: Build and push multiarch operator image to ghcr uses: docker/build-push-action@v3 with: context: . file: docker/Dockerfile push: true - build-args: BASE_IMAGE=alpine:3.15 - tags: "${{ steps.image.outputs.NAME }}" + build-args: BASE_IMAGE=alpine:3 + tags: "${{ steps.image.outputs.OPERATOR_IMAGE }}" + platforms: linux/amd64,linux/arm64 + + - name: Build and push multiarch ui image to ghcr + uses: docker/build-push-action@v3 + with: + context: ui + push: true + build-args: BASE_IMAGE=python:3.11-slim + tags: "${{ steps.image_ui.outputs.UI_IMAGE }}" + platforms: linux/amd64,linux/arm64 + + - name: Build and push multiarch logical-backup image to ghcr + uses: docker/build-push-action@v3 + with: + context: logical-backup + push: true + build-args: BASE_IMAGE=ubuntu:22.04 + tags: "${{ steps.image_lb.outputs.BACKUP_IMAGE }}" platforms: linux/amd64,linux/arm64 diff --git a/.github/workflows/run_e2e.yaml b/.github/workflows/run_e2e.yaml index e1cfc6d57..16573046e 100644 --- a/.github/workflows/run_e2e.yaml +++ b/.github/workflows/run_e2e.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-go@v2 with: - go-version: "^1.19.8" + go-version: "^1.23.4" - name: Make dependencies run: make deps mocks - name: Code generation diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index ec08dfee3..db47f6e40 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: "^1.19.8" + go-version: "^1.23.4" - name: Make dependencies run: make deps mocks - name: Compile @@ -22,7 +22,7 @@ jobs: - name: Run unit tests run: go test -race -covermode atomic -coverprofile=coverage.out ./... - name: Convert coverage to lcov - uses: jandelgado/gcov2lcov-action@v1.0.8 + uses: jandelgado/gcov2lcov-action@v1.1.1 - name: Coveralls uses: coverallsapp/github-action@master with: diff --git a/.gitignore b/.gitignore index 081eb5fba..5938db216 100644 --- a/.gitignore +++ b/.gitignore @@ -102,3 +102,7 @@ e2e/tls *.pot mocks + +ui/.npm/ + +.DS_Store diff --git a/CODEOWNERS b/CODEOWNERS index daca96b42..ca6f43a72 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ # global owners -* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet +* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet @macedigital diff --git a/LICENSE b/LICENSE index 5d22aa80a..b21099078 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2023 Zalando SE +Copyright (c) 2024 Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/MAINTAINERS b/MAINTAINERS index ea2a29ca0..cc07af957 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3,4 +3,5 @@ Felix Kunde Jan Mussler Jociele Padilha Ida Novindasari -Polina Bungina \ No newline at end of file +Polina Bungina +Matthias Adler diff --git a/Makefile b/Makefile index 7121fd322..8fc4b36f6 100644 --- a/Makefile +++ b/Makefile @@ -69,7 +69,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE} docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" . indocker-race: - docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.19.8 bash -c "make linux" + docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.23.4 bash -c "make linux" push: docker push "$(IMAGE):$(TAG)$(CDP_TAG)" @@ -78,7 +78,7 @@ mocks: GO111MODULE=on go generate ./... tools: - GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.25.9 + GO111MODULE=on go get k8s.io/client-go@kubernetes-1.30.4 GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0 GO111MODULE=on go mod tidy diff --git a/README.md b/README.md index fbcd325ff..9493115de 100644 --- a/README.md +++ b/README.md @@ -24,18 +24,17 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as * Basic credential and user management on K8s, eases application deployments * Support for custom TLS certificates * UI to create and edit Postgres cluster manifests -* Support for AWS EBS gp2 to gp3 migration, supporting iops and throughput configuration * Compatible with OpenShift ### PostgreSQL features -* Supports PostgreSQL 15, starting from 10+ +* Supports PostgreSQL 17, starting from 13+ * Streaming replication cluster via Patroni * Point-In-Time-Recovery with -[pg_basebackup](https://www.postgresql.org/docs/11/app-pgbasebackup.html) / +[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) / [WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo) * Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon), -[pg_stat_statements](https://www.postgresql.org/docs/15/pgstatstatements.html), +[pg_stat_statements](https://www.postgresql.org/docs/17/pgstatstatements.html), [pgextwlist](https://github.com/dimitri/pgextwlist), [pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon) * Incl. popular Postgres extensions such as @@ -45,6 +44,7 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as [pg_partman](https://github.com/pgpartman/pg_partman), [pg_stat_kcache](https://github.com/powa-team/pg_stat_kcache), [pgq](https://github.com/pgq/pgq), +[pgvector](https://github.com/pgvector/pgvector), [plpgsql_check](https://github.com/okbob/plpgsql_check), [postgis](https://postgis.net/), [set_user](https://github.com/pgaudit/set_user) and @@ -57,11 +57,12 @@ production for over five years. | Release | Postgres versions | K8s versions | Golang | | :-------- | :---------------: | :---------------: | :-----: | -| v1.10.* | 10 → 15 | 1.21+ | 1.19.8 | +| v1.14.0 | 13 → 17 | 1.27+ | 1.23.4 | +| v1.13.0 | 12 → 16 | 1.27+ | 1.22.5 | +| v1.12.0 | 11 → 16 | 1.27+ | 1.22.3 | +| v1.11.0 | 11 → 16 | 1.27+ | 1.21.7 | +| v1.10.1 | 10 → 15 | 1.21+ | 1.19.8 | | v1.9.0 | 10 → 15 | 1.21+ | 1.18.9 | -| v1.8.* | 9.5 → 14 | 1.20 → 1.24 | 1.17.4 | -| v1.7.1 | 9.5 → 14 | 1.20 → 1.24 | 1.16.9 | - ## Getting started @@ -87,9 +88,3 @@ There is a browser-friendly version of this documentation at * [Configuration options](docs/reference/operator_parameters.md) * [Postgres manifest reference](docs/reference/cluster_manifest.md) * [Command-line options and environment variables](docs/reference/command_line_and_environment.md) - -## Community - -There are two places to get in touch with the community: -1. The [GitHub issue tracker](https://github.com/zalando/postgres-operator/issues) -2. The **#postgres-operator** [slack channel](https://postgres-slack.herokuapp.com) diff --git a/charts/postgres-operator-ui/Chart.yaml b/charts/postgres-operator-ui/Chart.yaml index c8b5144dc..f4e2adf95 100644 --- a/charts/postgres-operator-ui/Chart.yaml +++ b/charts/postgres-operator-ui/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: postgres-operator-ui -version: 1.10.1 -appVersion: 1.10.1 +version: 1.14.0 +appVersion: 1.14.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience keywords: diff --git a/charts/postgres-operator-ui/index.yaml b/charts/postgres-operator-ui/index.yaml index b0a4f29ed..dab9594e9 100644 --- a/charts/postgres-operator-ui/index.yaml +++ b/charts/postgres-operator-ui/index.yaml @@ -2,11 +2,11 @@ apiVersion: v1 entries: postgres-operator-ui: - apiVersion: v2 - appVersion: 1.10.1 - created: "2023-09-07T16:27:29.490678409+02:00" + appVersion: 1.14.0 + created: "2024-12-23T11:26:07.721761867+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience - digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce + digest: e87ed898079a852957a67a4caf3fbd27b9098e413f5d961b7a771a6ae8b3e17c home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -22,14 +22,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-ui-1.10.1.tgz - version: 1.10.1 + - postgres-operator-ui-1.14.0.tgz + version: 1.14.0 - apiVersion: v2 - appVersion: 1.10.0 - created: "2023-09-07T16:27:29.489712628+02:00" + appVersion: 1.13.0 + created: "2024-12-23T11:26:07.719409282+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience - digest: 47413650e3188539ae778a601998efa2c4f80b8aa16e3668a2fc7b72e014b605 + digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -45,14 +45,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-ui-1.10.0.tgz - version: 1.10.0 + - postgres-operator-ui-1.13.0.tgz + version: 1.13.0 - apiVersion: v2 - appVersion: 1.9.0 - created: "2023-09-07T16:27:29.506671133+02:00" + appVersion: 1.12.2 + created: "2024-12-23T11:26:07.717202918+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience - digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc + digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -68,14 +68,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-ui-1.9.0.tgz - version: 1.9.0 + - postgres-operator-ui-1.12.2.tgz + version: 1.12.2 - apiVersion: v2 - appVersion: 1.8.2 - created: "2023-09-07T16:27:29.505718885+02:00" + appVersion: 1.11.0 + created: "2024-12-23T11:26:07.714792146+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience - digest: fbfc90fa8fd007a08a7c02e0ec9108bb8282cbb42b8c976d88f2193d6edff30c + digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -91,60 +91,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-ui-1.8.2.tgz - version: 1.8.2 + - postgres-operator-ui-1.11.0.tgz + version: 1.11.0 - apiVersion: v2 - appVersion: 1.8.1 - created: "2023-09-07T16:27:29.504804961+02:00" - description: Postgres Operator UI provides a graphical interface for a convenient - database-as-a-service user experience - digest: d26342e385ea51a0fbfbe23477999863e9489664ae803ea5c56da8897db84d24 - home: https://github.com/zalando/postgres-operator - keywords: - - postgres - - operator - - ui - - cloud-native - - patroni - - spilo - maintainers: - - email: opensource@zalando.de - name: Zalando - name: postgres-operator-ui - sources: - - https://github.com/zalando/postgres-operator - urls: - - postgres-operator-ui-1.8.1.tgz - version: 1.8.1 - - apiVersion: v1 - appVersion: 1.8.0 - created: "2023-09-07T16:27:29.503862231+02:00" - description: Postgres Operator UI provides a graphical interface for a convenient - database-as-a-service user experience - digest: d4a7b40c23fd167841cc28342afdbd5ecc809181913a5c31061c83139187f148 - home: https://github.com/zalando/postgres-operator - keywords: - - postgres - - operator - - ui - - cloud-native - - patroni - - spilo - maintainers: - - email: opensource@zalando.de - name: Zalando - name: postgres-operator-ui - sources: - - https://github.com/zalando/postgres-operator - urls: - - postgres-operator-ui-1.8.0.tgz - version: 1.8.0 - - apiVersion: v1 - appVersion: 1.7.1 - created: "2023-09-07T16:27:29.502938358+02:00" + appVersion: 1.10.1 + created: "2024-12-23T11:26:07.712194397+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience - digest: 97aed1a1d37cd5f8441eea9522f38e56cc829786ad2134c437a5e6a15c995869 + digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -160,14 +114,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-ui-1.7.1.tgz - version: 1.7.1 - - apiVersion: v1 - appVersion: 1.7.0 - created: "2023-09-07T16:27:29.494088829+02:00" + - postgres-operator-ui-1.10.1.tgz + version: 1.10.1 + - apiVersion: v2 + appVersion: 1.9.0 + created: "2024-12-23T11:26:07.723891496+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience - digest: 37fba1968347daad393dbd1c6ee6e5b6a24d1095f972c0102197531c62dcada8 + digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -183,6 +137,6 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-ui-1.7.0.tgz - version: 1.7.0 -generated: "2023-09-07T16:27:29.488457568+02:00" + - postgres-operator-ui-1.9.0.tgz + version: 1.9.0 +generated: "2024-12-23T11:26:07.709192608+01:00" diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.10.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.10.0.tgz deleted file mode 100644 index 7bc308f0f..000000000 Binary files a/charts/postgres-operator-ui/postgres-operator-ui-1.10.0.tgz and /dev/null differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.11.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.11.0.tgz new file mode 100644 index 000000000..7612a159b Binary files /dev/null and b/charts/postgres-operator-ui/postgres-operator-ui-1.11.0.tgz differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.12.2.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.12.2.tgz new file mode 100644 index 000000000..f34fd8f11 Binary files /dev/null and b/charts/postgres-operator-ui/postgres-operator-ui-1.12.2.tgz differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.13.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.13.0.tgz new file mode 100644 index 000000000..21aadc076 Binary files /dev/null and b/charts/postgres-operator-ui/postgres-operator-ui-1.13.0.tgz differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.14.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.14.0.tgz new file mode 100644 index 000000000..8e229d0f5 Binary files /dev/null and b/charts/postgres-operator-ui/postgres-operator-ui-1.14.0.tgz differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.7.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.7.0.tgz deleted file mode 100644 index 1c5cae51b..000000000 Binary files a/charts/postgres-operator-ui/postgres-operator-ui-1.7.0.tgz and /dev/null differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.7.1.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.7.1.tgz deleted file mode 100644 index 45c01d715..000000000 Binary files a/charts/postgres-operator-ui/postgres-operator-ui-1.7.1.tgz and /dev/null differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.8.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.8.0.tgz deleted file mode 100644 index e3563911c..000000000 Binary files a/charts/postgres-operator-ui/postgres-operator-ui-1.8.0.tgz and /dev/null differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.8.1.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.8.1.tgz deleted file mode 100644 index 98fa4e7d3..000000000 Binary files a/charts/postgres-operator-ui/postgres-operator-ui-1.8.1.tgz and /dev/null differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.8.2.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.8.2.tgz deleted file mode 100644 index 806ee9b23..000000000 Binary files a/charts/postgres-operator-ui/postgres-operator-ui-1.8.2.tgz and /dev/null differ diff --git a/charts/postgres-operator-ui/templates/deployment.yaml b/charts/postgres-operator-ui/templates/deployment.yaml index 5ae9e003c..fbb9ee086 100644 --- a/charts/postgres-operator-ui/templates/deployment.yaml +++ b/charts/postgres-operator-ui/templates/deployment.yaml @@ -9,7 +9,7 @@ metadata: name: {{ template "postgres-operator-ui.fullname" . }} namespace: {{ .Release.Namespace }} spec: - replicas: 1 + replicas: {{ .Values.replicaCount }} selector: matchLabels: app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} @@ -84,13 +84,22 @@ spec: "limit_iops": 16000, "limit_throughput": 1000, "postgresql_versions": [ + "17", + "16", "15", "14", - "13", - "12", - "11" + "13" ] } {{- if .Values.extraEnvs }} {{- .Values.extraEnvs | toYaml | nindent 12 }} {{- end }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} diff --git a/charts/postgres-operator-ui/values.yaml b/charts/postgres-operator-ui/values.yaml index 23eae0c45..9923ff023 100644 --- a/charts/postgres-operator-ui/values.yaml +++ b/charts/postgres-operator-ui/values.yaml @@ -6,9 +6,9 @@ replicaCount: 1 # configure ui image image: - registry: registry.opensource.zalan.do - repository: acid/postgres-operator-ui - tag: v1.10.1 + registry: ghcr.io + repository: zalando/postgres-operator-ui + tag: v1.14.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -45,6 +45,7 @@ envs: operatorApiUrl: "http://postgres-operator:8080" operatorClusterNameLabel: "cluster-name" resourcesVisible: "False" + # Set to "*" to allow viewing/creation of clusters in all namespaces targetNamespace: "default" teams: - "acid" @@ -61,8 +62,6 @@ podAnnotations: extraEnvs: [] # Exemple of settings to make snapshot view working in the ui when using AWS - # - name: WALE_S3_ENDPOINT - # value: https+path://s3.us-east-1.amazonaws.com:443 # - name: SPILO_S3_BACKUP_PREFIX # value: spilo/ # - name: AWS_ACCESS_KEY_ID @@ -82,8 +81,6 @@ extraEnvs: # key: AWS_DEFAULT_REGION # - name: SPILO_S3_BACKUP_BUCKET # value: - # - name: "USE_AWS_INSTANCE_PROFILE" - # value: "true" # configure UI service service: @@ -110,3 +107,18 @@ ingress: # - secretName: ui-tls # hosts: # - ui.exmaple.org + +# priority class for operator-ui pod +priorityClassName: "" + +# Affinity for pod assignment +# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +# Tolerations for pod assignment +# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] diff --git a/charts/postgres-operator/Chart.yaml b/charts/postgres-operator/Chart.yaml index 7a7f70567..35852c488 100644 --- a/charts/postgres-operator/Chart.yaml +++ b/charts/postgres-operator/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: postgres-operator -version: 1.10.1 -appVersion: 1.10.1 +version: 1.14.0 +appVersion: 1.14.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes keywords: diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index 7e1ecbde1..058769acf 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -68,7 +68,7 @@ spec: type: string docker_image: type: string - default: "ghcr.io/zalando/spilo-15:3.0-p1" + default: "ghcr.io/zalando/spilo-17:4.0-p2" enable_crd_registration: type: boolean default: true @@ -160,17 +160,17 @@ spec: properties: major_version_upgrade_mode: type: string - default: "off" + default: "manual" major_version_upgrade_team_allow_list: type: array items: type: string minimal_major_version: type: string - default: "11" + default: "13" target_major_version: type: string - default: "15" + default: "17" kubernetes: type: object properties: @@ -205,9 +205,18 @@ spec: enable_cross_namespace_secret: type: boolean default: false + enable_finalizers: + type: boolean + default: false enable_init_containers: type: boolean default: true + enable_owner_references: + type: boolean + default: false + enable_persistent_volume_claim_deletion: + type: boolean + default: true enable_pod_antiaffinity: type: boolean default: false @@ -217,6 +226,9 @@ spec: enable_readiness_probe: type: boolean default: false + enable_secrets_deletion: + type: boolean + default: true enable_sidecars: type: boolean default: true @@ -275,6 +287,9 @@ spec: oauth_token_secret_name: type: string default: "postgresql-operator" + pdb_master_label_selector: + type: boolean + default: true pdb_name_format: type: string default: "postgres-{cluster}-pdb" @@ -361,34 +376,28 @@ spec: properties: default_cpu_limit: type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default: "1" + pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' default_cpu_request: type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default: "100m" + pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' default_memory_limit: type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default: "500Mi" + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' default_memory_request: type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default: "100Mi" + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' max_cpu_request: type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' max_memory_request: type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' min_cpu_limit: type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default: "250m" + pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' min_memory_limit: type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default: "250Mi" + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' timeouts: type: object properties: @@ -463,7 +472,6 @@ spec: type: string additional_secret_mount_path: type: string - default: "/meta/credentials" aws_region: type: string default: "eu-central-1" @@ -502,7 +510,7 @@ spec: pattern: '^(\d+m|\d+(\.\d{1,3})?)$' logical_backup_docker_image: type: string - default: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1" + default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" logical_backup_google_application_credentials: type: string logical_backup_job_prefix: @@ -525,6 +533,8 @@ spec: type: string logical_backup_s3_bucket: type: string + logical_backup_s3_bucket_prefix: + type: string logical_backup_s3_endpoint: type: string logical_backup_s3_region: @@ -539,6 +549,8 @@ spec: type: string pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' default: "30 00 * * *" + logical_backup_cronjob_environment_secret: + type: string debug: type: object properties: @@ -650,7 +662,7 @@ spec: default: "pooler" connection_pooler_image: type: string - default: "registry.opensource.zalan.do/acid/pgbouncer:master-27" + default: "registry.opensource.zalan.do/acid/pgbouncer:master-32" connection_pooler_max_db_connections: type: integer default: 60 @@ -667,19 +679,15 @@ spec: connection_pooler_default_cpu_limit: type: string pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default: "1" connection_pooler_default_cpu_request: type: string pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default: "500m" connection_pooler_default_memory_limit: type: string pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default: "100Mi" connection_pooler_default_memory_request: type: string pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default: "100Mi" patroni: type: object properties: diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index c9fd30f87..8083e5e1d 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -87,10 +87,14 @@ spec: - mountPath - volumeSource properties: + isSubPathExpr: + type: boolean name: type: string mountPath: type: string + subPath: + type: string targetContainers: type: array nullable: true @@ -99,8 +103,6 @@ spec: volumeSource: type: object x-kubernetes-preserve-unknown-fields: true - subPath: - type: string allowedSourceRanges: type: array nullable: true @@ -215,6 +217,8 @@ spec: items: type: object x-kubernetes-preserve-unknown-fields: true + logicalBackupRetention: + type: string logicalBackupSchedule: type: string pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' @@ -222,7 +226,7 @@ spec: type: array items: type: string - pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' + pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' masterServiceAnnotations: type: object additionalProperties: @@ -371,12 +375,11 @@ spec: version: type: string enum: - - "10" - - "11" - - "12" - "13" - "14" - "15" + - "16" + - "17" parameters: type: object additionalProperties: @@ -441,6 +444,12 @@ spec: pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' # Note: the value specified here must not be zero or be higher # than the corresponding limit. + hugepages-2Mi: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + hugepages-1Gi: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' requests: type: object properties: @@ -450,6 +459,12 @@ spec: memory: type: string pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + hugepages-2Mi: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + hugepages-1Gi: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' schedulerName: type: string serviceAnnotations: @@ -499,6 +514,9 @@ spec: type: string batchSize: type: integer + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' database: type: string enableRecovery: @@ -507,6 +525,9 @@ spec: type: object additionalProperties: type: string + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' tables: type: object additionalProperties: @@ -518,6 +539,8 @@ spec: type: string idColumn: type: string + ignoreRecovery: + type: boolean payloadColumn: type: string recoveryEventType: @@ -600,6 +623,11 @@ spec: - SUPERUSER - nosuperuser - NOSUPERUSER + usersIgnoringSecretRotation: + type: array + nullable: true + items: + type: string usersWithInPlaceSecretRotation: type: array nullable: true @@ -615,6 +643,8 @@ spec: required: - size properties: + isSubPathExpr: + type: boolean iops: type: integer selector: diff --git a/charts/postgres-operator/index.yaml b/charts/postgres-operator/index.yaml index 0a489c57e..4da98d70a 100644 --- a/charts/postgres-operator/index.yaml +++ b/charts/postgres-operator/index.yaml @@ -2,11 +2,11 @@ apiVersion: v1 entries: postgres-operator: - apiVersion: v2 - appVersion: 1.10.1 - created: "2023-09-07T16:26:25.96185313+02:00" + appVersion: 1.14.0 + created: "2024-12-23T11:25:32.596716566+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes - digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c + digest: 36e1571f3f455b213f16cdda7b1158648e8e84deb804ba47ed6b9b6d19263ba8 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -21,14 +21,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-1.10.1.tgz - version: 1.10.1 + - postgres-operator-1.14.0.tgz + version: 1.14.0 - apiVersion: v2 - appVersion: 1.10.0 - created: "2023-09-07T16:26:25.960303202+02:00" + appVersion: 1.13.0 + created: "2024-12-23T11:25:32.591136261+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes - digest: 60fc5c8059dfed175d14e1034b40997d9c59d33ec8ea158c0597f7228ab04b51 + digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -43,14 +43,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-1.10.0.tgz - version: 1.10.0 + - postgres-operator-1.13.0.tgz + version: 1.13.0 - apiVersion: v2 - appVersion: 1.9.0 - created: "2023-09-07T16:26:25.971662154+02:00" + appVersion: 1.12.2 + created: "2024-12-23T11:25:32.585419709+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes - digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276 + digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -65,14 +65,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-1.9.0.tgz - version: 1.9.0 + - postgres-operator-1.12.2.tgz + version: 1.12.2 - apiVersion: v2 - appVersion: 1.8.2 - created: "2023-09-07T16:26:25.97011158+02:00" + appVersion: 1.11.0 + created: "2024-12-23T11:25:32.580077286+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes - digest: f77ffad2e98b72a621e5527015cf607935d3ed688f10ba4b626435acb9631b5b + digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -87,58 +87,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-1.8.2.tgz - version: 1.8.2 + - postgres-operator-1.11.0.tgz + version: 1.11.0 - apiVersion: v2 - appVersion: 1.8.1 - created: "2023-09-07T16:26:25.968682347+02:00" - description: Postgres Operator creates and manages PostgreSQL clusters running - in Kubernetes - digest: ee0c3bb6ba72fa4289ba3b1c6060e5b312dd023faba2a61b4cb7d9e5e2cc57a5 - home: https://github.com/zalando/postgres-operator - keywords: - - postgres - - operator - - cloud-native - - patroni - - spilo - maintainers: - - email: opensource@zalando.de - name: Zalando - name: postgres-operator - sources: - - https://github.com/zalando/postgres-operator - urls: - - postgres-operator-1.8.1.tgz - version: 1.8.1 - - apiVersion: v1 - appVersion: 1.8.0 - created: "2023-09-07T16:26:25.967242444+02:00" - description: Postgres Operator creates and manages PostgreSQL clusters running - in Kubernetes - digest: 3ae232cf009e09aa2ad11c171484cd2f1b72e63c59735e58fbe2b6eb842f4c86 - home: https://github.com/zalando/postgres-operator - keywords: - - postgres - - operator - - cloud-native - - patroni - - spilo - maintainers: - - email: opensource@zalando.de - name: Zalando - name: postgres-operator - sources: - - https://github.com/zalando/postgres-operator - urls: - - postgres-operator-1.8.0.tgz - version: 1.8.0 - - apiVersion: v1 - appVersion: 1.7.1 - created: "2023-09-07T16:26:25.965786379+02:00" + appVersion: 1.10.1 + created: "2024-12-23T11:25:32.574641578+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes - digest: 7262563bec0b058e669ae6bcff0226e33fa9ece9c41ac46a53274046afe7700c + digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -153,14 +109,14 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-1.7.1.tgz - version: 1.7.1 - - apiVersion: v1 - appVersion: 1.7.0 - created: "2023-09-07T16:26:25.963469658+02:00" + - postgres-operator-1.10.1.tgz + version: 1.10.1 + - apiVersion: v2 + appVersion: 1.9.0 + created: "2024-12-23T11:25:32.604748814+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes - digest: c3e99fb94305f81484b8b1af18eefb78681f3b5d057d5ad10565e4afb7c65ffe + digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276 home: https://github.com/zalando/postgres-operator keywords: - postgres @@ -175,6 +131,6 @@ entries: sources: - https://github.com/zalando/postgres-operator urls: - - postgres-operator-1.7.0.tgz - version: 1.7.0 -generated: "2023-09-07T16:26:25.958442963+02:00" + - postgres-operator-1.9.0.tgz + version: 1.9.0 +generated: "2024-12-23T11:25:32.568598763+01:00" diff --git a/charts/postgres-operator/postgres-operator-1.10.0.tgz b/charts/postgres-operator/postgres-operator-1.10.0.tgz deleted file mode 100644 index 71f3483f5..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.10.0.tgz and /dev/null differ diff --git a/charts/postgres-operator/postgres-operator-1.11.0.tgz b/charts/postgres-operator/postgres-operator-1.11.0.tgz new file mode 100644 index 000000000..61c2eadb0 Binary files /dev/null and b/charts/postgres-operator/postgres-operator-1.11.0.tgz differ diff --git a/charts/postgres-operator/postgres-operator-1.12.2.tgz b/charts/postgres-operator/postgres-operator-1.12.2.tgz new file mode 100644 index 000000000..a74c25c47 Binary files /dev/null and b/charts/postgres-operator/postgres-operator-1.12.2.tgz differ diff --git a/charts/postgres-operator/postgres-operator-1.13.0.tgz b/charts/postgres-operator/postgres-operator-1.13.0.tgz new file mode 100644 index 000000000..3d7ca4ce6 Binary files /dev/null and b/charts/postgres-operator/postgres-operator-1.13.0.tgz differ diff --git a/charts/postgres-operator/postgres-operator-1.14.0.tgz b/charts/postgres-operator/postgres-operator-1.14.0.tgz new file mode 100644 index 000000000..df95fd01d Binary files /dev/null and b/charts/postgres-operator/postgres-operator-1.14.0.tgz differ diff --git a/charts/postgres-operator/postgres-operator-1.7.0.tgz b/charts/postgres-operator/postgres-operator-1.7.0.tgz deleted file mode 100644 index 2a8bc745e..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.7.0.tgz and /dev/null differ diff --git a/charts/postgres-operator/postgres-operator-1.7.1.tgz b/charts/postgres-operator/postgres-operator-1.7.1.tgz deleted file mode 100644 index 2d20fe5b0..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.7.1.tgz and /dev/null differ diff --git a/charts/postgres-operator/postgres-operator-1.8.0.tgz b/charts/postgres-operator/postgres-operator-1.8.0.tgz deleted file mode 100644 index caecd2929..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.8.0.tgz and /dev/null differ diff --git a/charts/postgres-operator/postgres-operator-1.8.1.tgz b/charts/postgres-operator/postgres-operator-1.8.1.tgz deleted file mode 100644 index 2235cbcd6..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.8.1.tgz and /dev/null differ diff --git a/charts/postgres-operator/postgres-operator-1.8.2.tgz b/charts/postgres-operator/postgres-operator-1.8.2.tgz deleted file mode 100644 index 61f0f8aee..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.8.2.tgz and /dev/null differ diff --git a/charts/postgres-operator/templates/_helpers.tpl b/charts/postgres-operator/templates/_helpers.tpl index ee3a8dd22..cb8c69c2b 100644 --- a/charts/postgres-operator/templates/_helpers.tpl +++ b/charts/postgres-operator/templates/_helpers.tpl @@ -38,6 +38,13 @@ Create a pod service account name. {{ default (printf "%s-%v" (include "postgres-operator.fullname" .) "pod") .Values.podServiceAccount.name }} {{- end -}} +{{/* +Create a pod priority class name. +*/}} +{{- define "postgres-pod.priorityClassName" -}} +{{ default (printf "%s-%v" (include "postgres-operator.fullname" .) "pod") .Values.podPriorityClassName.name }} +{{- end -}} + {{/* Create a controller ID. */}} @@ -63,8 +70,8 @@ Flatten nested config options when ConfigMap is used as ConfigTarget {{- $list := list }} {{- range $subKey, $subValue := $value }} {{- $list = append $list (printf "%s:%s" $subKey $subValue) }} -{{ $key }}: {{ join "," $list | quote }} {{- end }} +{{ $key }}: {{ join "," $list | quote }} {{- else }} {{ $key }}: {{ $value | quote }} {{- end }} diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index 199086acc..ad3b46064 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -120,6 +120,7 @@ rules: - create - delete - get + - patch - update # to check nodes for node readiness label - apiGroups: @@ -139,8 +140,8 @@ rules: - delete - get - list -{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }} - patch +{{- if or (toString .Values.configKubernetes.storage_resize_mode | eq "pvc") (toString .Values.configKubernetes.storage_resize_mode | eq "mixed") }} - update {{- end }} # to read existing PVs. Creation should be done via dynamic provisioning @@ -196,6 +197,7 @@ rules: - get - list - patch + - update # to CRUD cron jobs for logical backups - apiGroups: - batch diff --git a/charts/postgres-operator/templates/configmap.yaml b/charts/postgres-operator/templates/configmap.yaml index 471f1aee4..9ea574172 100644 --- a/charts/postgres-operator/templates/configmap.yaml +++ b/charts/postgres-operator/templates/configmap.yaml @@ -10,9 +10,9 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} data: - {{- if .Values.podPriorityClassName }} - pod_priority_class_name: {{ .Values.podPriorityClassName }} - {{- end }} +{{- if or .Values.podPriorityClassName.create .Values.podPriorityClassName.name }} + pod_priority_class_name: {{ include "postgres-pod.priorityClassName" . }} +{{- end }} pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} {{- include "flattenValuesForConfigMap" .Values.configGeneral | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configUsers | indent 2 }} diff --git a/charts/postgres-operator/templates/deployment.yaml b/charts/postgres-operator/templates/deployment.yaml index 1752cb397..395843942 100644 --- a/charts/postgres-operator/templates/deployment.yaml +++ b/charts/postgres-operator/templates/deployment.yaml @@ -52,6 +52,9 @@ spec: {{- if .Values.controllerID.create }} - name: CONTROLLER_ID value: {{ template "postgres-operator.controllerID" . }} + {{- end }} + {{- if .Values.extraEnvs }} +{{ toYaml .Values.extraEnvs | indent 8 }} {{- end }} resources: {{ toYaml .Values.resources | indent 10 }} diff --git a/charts/postgres-operator/templates/operatorconfiguration.yaml b/charts/postgres-operator/templates/operatorconfiguration.yaml index 6d3b0eb83..b72bfb899 100644 --- a/charts/postgres-operator/templates/operatorconfiguration.yaml +++ b/charts/postgres-operator/templates/operatorconfiguration.yaml @@ -14,32 +14,32 @@ configuration: users: {{ tpl (toYaml .Values.configUsers) . | indent 4 }} major_version_upgrade: -{{ toYaml .Values.configMajorVersionUpgrade | indent 4 }} +{{ tpl (toYaml .Values.configMajorVersionUpgrade) . | indent 4 }} kubernetes: - {{- if .Values.podPriorityClassName }} - pod_priority_class_name: {{ .Values.podPriorityClassName }} + {{- if .Values.podPriorityClassName.name }} + pod_priority_class_name: {{ .Values.podPriorityClassName.name }} {{- end }} pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} oauth_token_secret_name: {{ template "postgres-operator.fullname" . }} {{ tpl (toYaml .Values.configKubernetes) . | indent 4 }} postgres_pod_resources: -{{ toYaml .Values.configPostgresPodResources | indent 4 }} +{{ tpl (toYaml .Values.configPostgresPodResources) . | indent 4 }} timeouts: -{{ toYaml .Values.configTimeouts | indent 4 }} +{{ tpl (toYaml .Values.configTimeouts) . | indent 4 }} load_balancer: -{{ toYaml .Values.configLoadBalancer | indent 4 }} +{{ tpl (toYaml .Values.configLoadBalancer) . | indent 4 }} aws_or_gcp: -{{ toYaml .Values.configAwsOrGcp | indent 4 }} +{{ tpl (toYaml .Values.configAwsOrGcp) . | indent 4 }} logical_backup: -{{ toYaml .Values.configLogicalBackup | indent 4 }} +{{ tpl (toYaml .Values.configLogicalBackup) . | indent 4 }} debug: -{{ toYaml .Values.configDebug | indent 4 }} +{{ tpl (toYaml .Values.configDebug) . | indent 4 }} teams_api: {{ tpl (toYaml .Values.configTeamsApi) . | indent 4 }} logging_rest_api: -{{ toYaml .Values.configLoggingRestApi | indent 4 }} +{{ tpl (toYaml .Values.configLoggingRestApi) . | indent 4 }} connection_pooler: -{{ toYaml .Values.configConnectionPooler | indent 4 }} +{{ tpl (toYaml .Values.configConnectionPooler) . | indent 4 }} patroni: -{{ toYaml .Values.configPatroni | indent 4 }} +{{ tpl (toYaml .Values.configPatroni) . | indent 4 }} {{- end }} diff --git a/charts/postgres-operator/templates/postgres-pod-priority-class.yaml b/charts/postgres-operator/templates/postgres-pod-priority-class.yaml index 583639eca..de78b501c 100644 --- a/charts/postgres-operator/templates/postgres-pod-priority-class.yaml +++ b/charts/postgres-operator/templates/postgres-pod-priority-class.yaml @@ -1,4 +1,4 @@ -{{- if .Values.podPriorityClassName }} +{{- if .Values.podPriorityClassName.create }} apiVersion: scheduling.k8s.io/v1 description: 'Use only for databases controlled by Postgres operator' kind: PriorityClass @@ -8,9 +8,9 @@ metadata: helm.sh/chart: {{ template "postgres-operator.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/instance: {{ .Release.Name }} - name: {{ .Values.podPriorityClassName }} + name: {{ include "postgres-pod.priorityClassName" . }} namespace: {{ .Release.Namespace }} preemptionPolicy: PreemptLowerPriority globalDefault: false -value: 1000000 +value: {{ .Values.podPriorityClassName.priority }} {{- end }} diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 854b29b10..bf94b63d0 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: - registry: registry.opensource.zalan.do - repository: acid/postgres-operator - tag: v1.10.1 + registry: ghcr.io + repository: zalando/postgres-operator + tag: v1.14.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -38,7 +38,7 @@ configGeneral: # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" # Spilo docker image - docker_image: ghcr.io/zalando/spilo-15:3.0-p1 + docker_image: ghcr.io/zalando/spilo-17:4.0-p2 # key name for annotation to ignore globally configured instance limits # ignore_instance_limits_annotation_key: "" @@ -83,15 +83,15 @@ configUsers: configMajorVersionUpgrade: # "off": no upgrade, "manual": manifest triggers action, "full": minimal version violation triggers too - major_version_upgrade_mode: "off" + major_version_upgrade_mode: "manual" # upgrades will only be carried out for clusters of listed teams when mode is "off" # major_version_upgrade_team_allow_list: # - acid # minimal Postgres major version that will not automatically be upgraded - minimal_major_version: "11" + minimal_major_version: "13" # target Postgres major version when upgrading clusters automatically - target_major_version: "15" + target_major_version: "17" configKubernetes: # list of additional capabilities for postgres container @@ -123,14 +123,24 @@ configKubernetes: # allow user secrets in other namespaces than the Postgres cluster enable_cross_namespace_secret: false + # use finalizers to ensure all managed resources are deleted prior to the postgresql CR + # this avoids stale resources in case the operator misses a delete event or is not running + # during deletion + enable_finalizers: false # enables initContainers to run actions before Spilo is started enable_init_containers: true + # toggles if child resources should have an owner reference to the postgresql CR + enable_owner_references: false + # toggles if operator should delete PVCs on cluster deletion + enable_persistent_volume_claim_deletion: true # toggles pod anti affinity on the Postgres pods enable_pod_antiaffinity: false # toggles PDB to set to MinAvailabe 0 or 1 enable_pod_disruption_budget: true # toogles readiness probe for database pods enable_readiness_probe: false + # toggles if operator should delete secrets on cluster deletion + enable_secrets_deletion: true # enables sidecar containers to run alongside Spilo in the same pod enable_sidecars: true @@ -163,10 +173,12 @@ configKubernetes: # namespaced name of the secret containing the OAuth2 token to pass to the teams API # oauth_token_secret_name: postgresql-operator - # defines the template for PDB (Pod Disruption Budget) names + # toggle if `spilo-role=master` selector should be added to the PDB (Pod Disruption Budget) + pdb_master_label_selector: true + # defines the template for PDB names pdb_name_format: "postgres-{cluster}-pdb" # specify the PVC retention policy when scaling down and/or deleting - persistent_volume_claim_retention_policy: + persistent_volume_claim_retention_policy: when_deleted: "retain" when_scaled: "retain" # switches pod anti affinity type to `preferredDuringSchedulingIgnoredDuringExecution` @@ -352,7 +364,7 @@ configLogicalBackup: # logical_backup_memory_request: "" # image for pods of the logical backup job (example runs pg_dumpall) - logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1" + logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" # path of google cloud service account json file # logical_backup_google_application_credentials: "" @@ -364,6 +376,8 @@ configLogicalBackup: logical_backup_s3_access_key_id: "" # S3 bucket to store backup results logical_backup_s3_bucket: "my-bucket-url" + # S3 bucket prefix to use + logical_backup_s3_bucket_prefix: "spilo" # S3 region of bucket logical_backup_s3_region: "" # S3 endpoint url when not using AWS @@ -376,6 +390,8 @@ configLogicalBackup: logical_backup_s3_retention_time: "" # backup schedule in the cron format logical_backup_schedule: "30 00 * * *" + # secret to be used as reference for env variables in cronjob + logical_backup_cronjob_environment_secret: "" # automate creation of human users with teams API service configTeamsApi: @@ -420,7 +436,7 @@ configConnectionPooler: # db user for pooler to use connection_pooler_user: "pooler" # docker image - connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27" + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32" # max db connections the pooler should hold connection_pooler_max_db_connections: 60 # default pooling mode @@ -462,7 +478,14 @@ podServiceAccount: priorityClassName: "" # priority class for database pods -podPriorityClassName: "" +podPriorityClassName: + # If create is false with no name set, no podPriorityClassName is specified. + # Hence, the pod priorityClass is the one with globalDefault set. + # If there is no PriorityClass with globalDefault set, the priority of Pods with no priorityClassName is zero. + create: true + # If not set a name is generated using the fullname template and "-pod" suffix + name: "" + priority: 1000000 resources: limits: @@ -483,6 +506,24 @@ readinessProbe: initialDelaySeconds: 5 periodSeconds: 10 +# configure extra environment variables +# Extra environment variables are writen in kubernetes format and added "as is" to the pod's env variables +# https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ +# https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables +extraEnvs: + [] + # Exemple of settings maximum amount of memory / cpu that can be used by go process (to match resources.limits) + # - name: MY_VAR + # value: my-value + # - name: GOMAXPROCS + # valueFrom: + # resourceFieldRef: + # resource: limits.cpu + # - name: GOMEMLIMIT + # valueFrom: + # resourceFieldRef: + # resource: limits.memory + # Affinity for pod assignment # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} diff --git a/cmd/main.go b/cmd/main.go index 0b48ac863..adbf0cce5 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -35,6 +35,8 @@ func init() { flag.BoolVar(&outOfCluster, "outofcluster", false, "Whether the operator runs in- our outside of the Kubernetes cluster.") flag.BoolVar(&config.NoDatabaseAccess, "nodatabaseaccess", false, "Disable all access to the database from the operator side.") flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API") + flag.IntVar(&config.KubeQPS, "kubeqps", 10, "Kubernetes api requests per second.") + flag.IntVar(&config.KubeBurst, "kubeburst", 20, "Kubernetes api requests burst limit.") flag.Parse() config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true" @@ -83,6 +85,9 @@ func main() { log.Fatalf("couldn't get REST config: %v", err) } + config.RestConfig.QPS = float32(config.KubeQPS) + config.RestConfig.Burst = config.KubeBurst + c := controller.NewController(&config, "") c.Run(stop, wg) diff --git a/delivery.yaml b/delivery.yaml index e7cceae3e..7eacd769b 100644 --- a/delivery.yaml +++ b/delivery.yaml @@ -5,43 +5,18 @@ pipeline: vm_config: type: linux size: large + image: cdp-runtime/go cache: paths: - - /go/pkg/mod + - /go/pkg/mod # pkg cache for Go modules + - ~/.cache/go-build # Go build cache commands: - - desc: 'Update' + - desc: Run unit tests cmd: | - apt-get update - - desc: 'Install required build software' - cmd: | - apt-get install -y make git apt-transport-https ca-certificates curl build-essential python3 python3-pip - - desc: 'Install go' - cmd: | - cd /tmp - wget -q https://storage.googleapis.com/golang/go1.19.8.linux-amd64.tar.gz -O go.tar.gz - tar -xf go.tar.gz - mv go /usr/local - ln -s /usr/local/go/bin/go /usr/bin/go - go version - - desc: 'Build docker image' - cmd: | - export PATH=$PATH:$HOME/go/bin - IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} - if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]] - then - IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator - else - IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test - fi - export IMAGE - make deps mocks docker - - desc: 'Run unit tests' - cmd: | - export PATH=$PATH:$HOME/go/bin - go test ./... - - desc: 'Push docker image' + make deps mocks test + + - desc: Build Docker image cmd: | - export PATH=$PATH:$HOME/go/bin IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]] then @@ -50,7 +25,7 @@ pipeline: IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test fi export IMAGE - make push + make docker push - id: build-operator-ui type: script @@ -90,7 +65,7 @@ pipeline: commands: - desc: Build image cmd: | - cd docker/logical-backup + cd logical-backup export TAG=$(git describe --tags --always --dirty) IMAGE="registry-write.opensource.zalan.do/acid/logical-backup" docker build --rm -t "$IMAGE:$TAG$CDP_TAG" . diff --git a/docker/DebugDockerfile b/docker/DebugDockerfile index 7c7ee8aee..18cb631fe 100644 --- a/docker/DebugDockerfile +++ b/docker/DebugDockerfile @@ -1,18 +1,14 @@ -FROM registry.opensource.zalan.do/library/alpine-3.15:latest +FROM golang:1.23-alpine LABEL maintainer="Team ACID @ Zalando " # We need root certificates to deal with teams api over https -RUN apk --no-cache add ca-certificates go git musl-dev +RUN apk -U add --no-cache ca-certificates delve COPY build/* / RUN addgroup -g 1000 pgo RUN adduser -D -u 1000 -G pgo -g 'Postgres Operator' pgo -RUN go get -d github.com/derekparker/delve/cmd/dlv -RUN cp /root/go/bin/dlv /dlv -RUN chown -R pgo:pgo /dlv - USER pgo:pgo RUN ls -l / diff --git a/docker/Dockerfile b/docker/Dockerfile index bad0dc71b..1fd2020d8 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,22 +1,20 @@ -ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3.15:latest +ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest +FROM golang:1.23-alpine AS builder ARG VERSION=latest -FROM ubuntu:20.04 as builder - -ARG VERSION - COPY . /go/src/github.com/zalando/postgres-operator WORKDIR /go/src/github.com/zalando/postgres-operator -ENV OPERATOR_LDFLAGS="-X=main.version=${VERSION}" -RUN bash docker/build_operator.sh +RUN GO111MODULE=on go mod vendor \ + && CGO_ENABLED=0 go build -o build/postgres-operator -v -ldflags "-X=main.version=${VERSION}" cmd/main.go FROM ${BASE_IMAGE} LABEL maintainer="Team ACID @ Zalando " +LABEL org.opencontainers.image.source="https://github.com/zalando/postgres-operator" # We need root certificates to deal with teams api over https -RUN apk --no-cache add curl -RUN apk --no-cache add ca-certificates +RUN apk -U upgrade --no-cache \ + && apk add --no-cache curl ca-certificates COPY --from=builder /go/src/github.com/zalando/postgres-operator/build/* / diff --git a/docker/build_operator.sh b/docker/build_operator.sh index 337389e4d..6c1817b1b 100644 --- a/docker/build_operator.sh +++ b/docker/build_operator.sh @@ -13,7 +13,7 @@ apt-get install -y wget ( cd /tmp - wget -q "https://storage.googleapis.com/golang/go1.19.8.linux-${arch}.tar.gz" -O go.tar.gz + wget -q "https://storage.googleapis.com/golang/go1.23.4.linux-${arch}.tar.gz" -O go.tar.gz tar -xf go.tar.gz mv go /usr/local ln -s /usr/local/go/bin/go /usr/bin/go diff --git a/docs/administrator.md b/docs/administrator.md index c44d08f90..f394b70ab 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -63,14 +63,17 @@ the `PGVERSION` environment variable is set for the database pods. Since `v1.6.0` the related option `enable_pgversion_env_var` is enabled by default. In-place major version upgrades can be configured to be executed by the -operator with the `major_version_upgrade_mode` option. By default it is set -to `off` which means the cluster version will not change when increased in -the manifest. Still, a rolling update would be triggered updating the -`PGVERSION` variable. But Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py) -script will notice the version mismatch and start the old version again. - -In this scenario the major version could then be run by a user from within the -master pod. Exec into the container and run: +operator with the `major_version_upgrade_mode` option. By default, it is +enabled (mode: `manual`). In any case, altering the version in the manifest +will trigger a rolling update of pods to update the `PGVERSION` env variable. +Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py) +script will notice the version mismatch but start the current version again. + +Next, the operator would call an updage script inside Spilo. When automatic +upgrades are disabled (mode: `off`) the upgrade could still be run by a user +from within the primary pod. This gives you full control about the point in +time when the upgrade can be started (check also maintenance windows below). +Exec into the container and run: ```bash python3 /scripts/inplace_upgrade.py N ``` @@ -79,8 +82,32 @@ The upgrade is usually fast, well under one minute for most DBs. Note, that changes become irrevertible once `pg_upgrade` is called. To understand the upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488). -When `major_version_upgrade_mode` is set to `manual` the operator will run -the upgrade script for you after the manifest is updated and pods are rotated. +When `major_version_upgrade_mode` is set to `full` the operator will compare +the version in the manifest with the configured `minimal_major_version`. If it +is lower the operator would start an automatic upgrade as described above. The +configured `major_target_version` will be used as the new version. This option +can be useful if you have to get rid of outdated major versions in your fleet. +Please note, that the operator does not patch the version in the manifest. +Thus, the `full` mode can create drift between desired and actual state. + +### Upgrade during maintenance windows + +When `maintenanceWindows` are defined in the Postgres manifest the operator +will trigger a major version upgrade only during these periods. Make sure they +are at least twice as long as your configured `resync_period` to guarantee +that operator actions can be triggered. + +### Upgrade annotations + +When an upgrade is executed, the operator sets an annotation in the PostgreSQL +resource, either `last-major-upgrade-success` if the upgrade succeeds, or +`last-major-upgrade-failure` if it fails. The value of the annotation is a +timestamp indicating when the upgrade occurred. + +If a PostgreSQL resource contains a failure annotation, the operator will not +attempt to retry the upgrade during a sync event. To remove the failure +annotation, you can revert the PostgreSQL version back to the current version. +This action will trigger the removal of the failure annotation. ## Non-default cluster domain @@ -223,9 +250,9 @@ configuration: Now, every cluster manifest must contain the configured annotation keys to trigger the delete process when running `kubectl delete pg`. Note, that the -`Postgresql` resource would still get deleted as K8s' API server does not -block it. Only the operator logs will tell, that the delete criteria wasn't -met. +`Postgresql` resource would still get deleted because the operator does not +instruct K8s' API server to block it. Only the operator logs will tell, that +the delete criteria was not met. **cluster manifest** @@ -243,11 +270,64 @@ spec: In case, the resource has been deleted accidentally or the annotations were simply forgotten, it's safe to recreate the cluster with `kubectl create`. -Existing Postgres cluster are not replaced by the operator. But, as the -original cluster still exists the status will show `CreateFailed` at first. -On the next sync event it should change to `Running`. However, as it is in -fact a new resource for K8s, the UID will differ which can trigger a rolling -update of the pods because the UID is used as part of backup path to S3. +Existing Postgres cluster are not replaced by the operator. But, when the +original cluster still exists the status will be `CreateFailed` at first. On +the next sync event it should change to `Running`. However, because it is in +fact a new resource for K8s, the UID and therefore, the backup path to S3, +will differ and trigger a rolling update of the pods. + +## Owner References and Finalizers + +The Postgres Operator can set [owner references](https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/) to most of a cluster's child resources to improve +monitoring with GitOps tools and enable cascading deletes. There are two +exceptions: + +* Persistent Volume Claims, because they are handled by the [PV Reclaim Policy]https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/ of the Stateful Set +* Cross-namespace secrets, because owner references are not allowed across namespaces by design + +The operator would clean these resources up with its regular delete loop +unless they got synced correctly. If for some reason the initial cluster sync +fails, e.g. after a cluster creation or operator restart, a deletion of the +cluster manifest might leave orphaned resources behind which the user has to +clean up manually. + +Another option is to enable finalizers which first ensures the deletion of all +child resources before the cluster manifest gets removed. There is a trade-off +though: The deletion is only performed after the next two operator SYNC cycles +with the first one setting a `deletionTimestamp` and the latter reacting to it. +The final removal of the custom resource will add a DELETE event to the worker +queue but the child resources are already gone at this point. If you do not +desire this behavior consider enabling owner references instead. + +**postgres-operator ConfigMap** + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: postgres-operator +data: + enable_finalizers: "false" + enable_owner_references: "true" +``` + +**OperatorConfiguration** + +```yaml +apiVersion: "acid.zalan.do/v1" +kind: OperatorConfiguration +metadata: + name: postgresql-operator-configuration +configuration: + kubernetes: + enable_finalizers: false + enable_owner_references: true +``` + +:warning: Please note, both options are disabled by default. When enabling owner +references the operator cannot block cascading deletes, even when the [delete protection annotations](administrator.md#delete-protection-via-annotations) +are in place. You would need an K8s admission controller that blocks the actual +`kubectl delete` API call e.g. based on existing annotations. ## Role-based access control for the operator @@ -304,7 +384,7 @@ exceptions: The interval of days can be set with `password_rotation_interval` (default `90` = 90 days, minimum 1). On each rotation the user name and password values are replaced in the K8s secret. They belong to a newly created user named after -the original role plus rotation date in YYMMDD format. All priviliges are +the original role plus rotation date in YYMMDD format. All privileges are inherited meaning that migration scripts should still grant and revoke rights against the original role. The timestamp of the next rotation (in RFC 3339 format, UTC timezone) is written to the secret as well. Note, if the rotation @@ -355,6 +435,23 @@ This would be the recommended option to enable rotation in secrets of database owners, but only if they are not used as application users for regular read and write operations. +### Ignore rotation for certain users + +If you wish to globally enable password rotation but need certain users to +opt out from it there are two ways. First, you can remove the user from the +manifest's `users` section. The corresponding secret to this user will no +longer be synced by the operator then. + +Secondly, if you want the operator to continue syncing the secret (e.g. to +recreate if it got accidentally removed) but cannot allow it being rotated, +add the user to the following list in your manifest: + +``` +spec: + usersIgnoringSecretRotation: + - bar_user +``` + ### Turning off password rotation When password rotation is turned off again the operator will check if the @@ -467,7 +564,7 @@ manifest affinity. ``` If `node_readiness_label_merge` is set to `"OR"` (default) the readiness label -affinty will be appended with its own expressions block: +affinity will be appended with its own expressions block: ```yaml affinity: @@ -523,22 +620,34 @@ By default the topology key for the pod anti affinity is set to `kubernetes.io/hostname`, you can set another topology key e.g. `failure-domain.beta.kubernetes.io/zone`. See [built-in node labels](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels) for available topology keys. -## Pod Disruption Budget +## Pod Disruption Budgets + +By default the operator creates two PodDisruptionBudgets (PDB) to protect the cluster +from voluntarily disruptions and hence unwanted DB downtime: so-called primary PDB and +and PDB for critical operations. -By default the operator uses a PodDisruptionBudget (PDB) to protect the cluster -from voluntarily disruptions and hence unwanted DB downtime. The `MinAvailable` -parameter of the PDB is set to `1` which prevents killing masters in single-node -clusters and/or the last remaining running instance in a multi-node cluster. +### Primary PDB +The `MinAvailable` parameter of this PDB is set to `1` and, if `pdb_master_label_selector` +is enabled, label selector includes `spilo-role=master` condition, which prevents killing +masters in single-node clusters and/or the last remaining running instance in a multi-node +cluster. + +## PDB for critical operations +The `MinAvailable` parameter of this PDB is equal to the `numberOfInstances` set in the +cluster manifest, while label selector includes `critical-operation=true` condition. This +allows to protect all pods of a cluster, given they are labeled accordingly. +For example, Operator labels all Spilo pods with `critical-operation=true` during the major +version upgrade run. You may want to protect cluster pods during other critical operations +by assigning the label to pods yourself or using other means of automation. The PDB is only relaxed in two scenarios: * If a cluster is scaled down to `0` instances (e.g. for draining nodes) * If the PDB is disabled in the configuration (`enable_pod_disruption_budget`) -The PDB is still in place having `MinAvailable` set to `0`. If enabled it will -be automatically set to `1` on scale up. Disabling PDBs helps avoiding blocking -Kubernetes upgrades in managed K8s environments at the cost of prolonged DB -downtime. See PR [#384](https://github.com/zalando/postgres-operator/pull/384) +The PDBs are still in place having `MinAvailable` set to `0`. Disabling PDBs +helps avoiding blocking Kubernetes upgrades in managed K8s environments at the +cost of prolonged DB downtime. See PR [#384](https://github.com/zalando/postgres-operator/pull/384) for the use case. ## Add cluster-specific labels @@ -1031,7 +1140,7 @@ metadata: iam.gke.io/gcp-service-account: @.iam.gserviceaccount.com ``` -2. Specify the new custom service account in your [operator paramaters](./reference/operator_parameters.md) +2. Specify the new custom service account in your [operator parameters](./reference/operator_parameters.md) If using manual deployment or kustomize, this is done by setting `pod_service_account_name` in your configuration file specified in the @@ -1200,7 +1309,7 @@ aws_or_gcp: If cluster members have to be (re)initialized restoring physical backups happens automatically either from the backup location or by running -[pg_basebackup](https://www.postgresql.org/docs/15/app-pgbasebackup.html) +[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) on one of the other running instances (preferably replicas if they do not lag behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster) clusters. @@ -1266,7 +1375,7 @@ but only snapshots of your data. In its current state, see logical backups as a way to quickly create SQL dumps that you can easily restore in an empty test cluster. -2. The [example image](https://github.com/zalando/postgres-operator/blob/master/docker/logical-backup/Dockerfile) implements the backup +2. The [example image](https://github.com/zalando/postgres-operator/blob/master/logical-backup/Dockerfile) implements the backup via `pg_dumpall` and upload of compressed and encrypted results to an S3 bucket. `pg_dumpall` requires a `superuser` access to a DB and runs on the replica when possible. @@ -1308,6 +1417,10 @@ configuration: volumeMounts: - mountPath: /custom-pgdata-mountpoint name: pgdata + env: + - name: "ENV_VAR_NAME" + value: "any-k8s-env-things" + command: ['sh', '-c', 'echo "logging" > /opt/logs.txt'] - ... ``` @@ -1348,6 +1461,8 @@ You can also expose the operator API through a [service](https://github.com/zala Some displayed options can be disabled from UI using simple flags under the `OPERATOR_UI_CONFIG` field in the deployment. +The viewing and creation of clusters within the UI is limited to the namespace specified by the `TARGET_NAMESPACE` option. To allow the creation and viewing of clusters in all namespaces, set `TARGET_NAMESPACE` to `*`. + ### Deploy the UI on K8s Now, apply all manifests from the `ui/manifests` folder to deploy the Postgres @@ -1380,7 +1495,7 @@ make docker # build in image in minikube docker env eval $(minikube docker-env) -docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.8.1 . +docker build -t ghcr.io/zalando/postgres-operator-ui:v1.13.0 . # apply UI manifests next to a running Postgres Operator kubectl apply -f manifests/ diff --git a/docs/developer.md b/docs/developer.md index 31f48d92d..c006aded0 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -186,7 +186,7 @@ go get -u github.com/derekparker/delve/cmd/dlv ``` RUN apk --no-cache add go git musl-dev -RUN go get -d github.com/derekparker/delve/cmd/dlv +RUN go get github.com/derekparker/delve/cmd/dlv ``` * Update the `Makefile` to build the project with debugging symbols. For that diff --git a/docs/quickstart.md b/docs/quickstart.md index f080bd567..2d6742354 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -230,7 +230,7 @@ kubectl delete postgresql acid-minimal-cluster ``` This should remove the associated StatefulSet, database Pods, Services and -Endpoints. The PersistentVolumes are released and the PodDisruptionBudget is +Endpoints. The PersistentVolumes are released and the PodDisruptionBudgets are deleted. Secrets however are not deleted and backups will remain in place. When deleting a cluster while it is still starting up or got stuck during that diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index f7d0eebc6..ab0353202 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -114,6 +114,12 @@ These parameters are grouped directly under the `spec` key in the manifest. this parameter. Optional, when empty the load balancer service becomes inaccessible from outside of the Kubernetes cluster. +* **maintenanceWindows** + a list which defines specific time frames when certain maintenance operations + such as automatic major upgrades or master pod migration. Accepted formats + are "01:00-06:00" for daily maintenance windows or "Sat:00:00-04:00" for specific + days, with all times in UTC. + * **users** a map of usernames to user flags for the users that should be created in the cluster by the operator. User flags are a list, allowed elements are @@ -142,6 +148,14 @@ These parameters are grouped directly under the `spec` key in the manifest. database, like a flyway user running a migration on Pod start. See more details in the [administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#password-replacement-without-extra-users). +* **usersIgnoringSecretRotation** + if you have secret rotation enabled globally you can define a list of + of users that should opt out from it, for example if you store credentials + outside of K8s, too, and corresponding deployments cannot dynamically + reference secrets. Note, you can also opt out from the rotation by removing + users from the manifest's `users` section. The operator will not drop them + from the database. Optional. + * **databases** a map of database names to database owners for the databases that should be created by the operator. The owner users should already exist on the cluster @@ -215,10 +229,17 @@ These parameters are grouped directly under the `spec` key in the manifest. Determines if the logical backup of this cluster should be taken and uploaded to S3. Default: false. Optional. +* **logicalBackupRetention** + You can set a retention time for the logical backup cron job to remove old backup + files after a new backup has been uploaded. Example values are "3 days", "2 weeks", or + "1 month". It takes precedence over the global `logical_backup_s3_retention_time` + configuration. Currently only supported for AWS. Optional. + * **logicalBackupSchedule** Schedule for the logical backup K8s cron job. Please take [the reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule) - into account. Optional. Default is: "30 00 \* \* \*" + into account. It takes precedence over the global `logical_backup_schedule` + configuration. Optional. * **additionalVolumes** List of additional volumes to mount in each container of the statefulset pod. @@ -226,7 +247,8 @@ These parameters are grouped directly under the `spec` key in the manifest. [kubernetes volumeSource](https://godoc.org/k8s.io/api/core/v1#VolumeSource). It allows you to mount existing PersistentVolumeClaims, ConfigMaps and Secrets inside the StatefulSet. Also an `emptyDir` volume can be shared between initContainer and statefulSet. - Additionaly, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example). + Additionally, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example). + Set `isSubPathExpr` to true if you want to include [API environment variables](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath-expanded-environment). You can also specify in which container the additional Volumes will be mounted with the `targetContainers` array option. If `targetContainers` is empty, additional volumes will be mounted only in the `postgres` container. If you set the `all` special item, it will be mounted in all containers (postgres + sidecars). @@ -235,7 +257,7 @@ These parameters are grouped directly under the `spec` key in the manifest. ## Prepared Databases The operator can create databases with default owner, reader and writer roles -without the need to specifiy them under `users` or `databases` sections. Those +without the need to specify them under `users` or `databases` sections. Those parameters are grouped under the `preparedDatabases` top-level key. For more information, see [user docs](../user.md#prepared-databases-with-roles-and-default-privileges). @@ -359,6 +381,14 @@ CPU and memory requests for the Postgres container. memory requests for the Postgres container. Optional, overrides the `default_memory_request` operator configuration parameter. +* **hugepages-2Mi** + hugepages-2Mi requests for the sidecar container. + Optional, defaults to not set. + +* **hugepages-1Gi** + 1Gi hugepages requests for the sidecar container. + Optional, defaults to not set. + ### Limits CPU and memory limits for the Postgres container. @@ -371,6 +401,14 @@ CPU and memory limits for the Postgres container. memory limits for the Postgres container. Optional, overrides the `default_memory_limits` operator configuration parameter. +* **hugepages-2Mi** + hugepages-2Mi requests for the sidecar container. + Optional, defaults to not set. + +* **hugepages-1Gi** + 1Gi hugepages requests for the sidecar container. + Optional, defaults to not set. + ## Parameters defining how to clone the cluster from another one Those parameters are applied when the cluster should be a clone of another one @@ -453,6 +491,9 @@ properties of the persistent storage that stores Postgres data. * **subPath** Subpath to use when mounting volume into Spilo container. Optional. +* **isSubPathExpr** + Set it to true if the specified subPath is an expression. Optional. + * **iops** When running the operator on AWS the latest generation of EBS volumes (`gp3`) allows for configuring the number of IOPS. Maximum is 16000. Optional. @@ -500,6 +541,14 @@ CPU and memory requests for the sidecar container. memory requests for the sidecar container. Optional, overrides the `default_memory_request` operator configuration parameter. Optional. +* **hugepages-2Mi** + hugepages-2Mi requests for the sidecar container. + Optional, defaults to not set. + +* **hugepages-1Gi** + 1Gi hugepages requests for the sidecar container. + Optional, defaults to not set. + ### Limits CPU and memory limits for the sidecar container. @@ -512,6 +561,14 @@ CPU and memory limits for the sidecar container. memory limits for the sidecar container. Optional, overrides the `default_memory_limits` operator configuration parameter. Optional. +* **hugepages-2Mi** + hugepages-2Mi requests for the sidecar container. + Optional, defaults to not set. + +* **hugepages-1Gi** + 1Gi hugepages requests for the sidecar container. + Optional, defaults to not set. + ## Connection pooler Parameters are grouped under the `connectionPooler` top-level key and specify @@ -581,7 +638,7 @@ the global configuration before adding the `tls` section'. ## Change data capture streams This sections enables change data capture (CDC) streams via Postgres' -[logical decoding](https://www.postgresql.org/docs/15/logicaldecoding.html) +[logical decoding](https://www.postgresql.org/docs/17/logicaldecoding.html) feature and `pgoutput` plugin. While the Postgres operator takes responsibility for providing the setup to publish change events, it relies on external tools to consume them. At Zalando, we are using a workflow based on @@ -595,11 +652,11 @@ can have the following properties: * **applicationId** The application name to which the database and CDC belongs to. For each - set of streams with a distinct `applicationId` a separate stream CR as well - as a separate logical replication slot will be created. This means there can - be different streams in the same database and streams with the same - `applicationId` are bundled in one stream CR. The stream CR will be called - like the Postgres cluster plus "-" suffix. Required. + set of streams with a distinct `applicationId` a separate stream resource as + well as a separate logical replication slot will be created. This means there + can be different streams in the same database and streams with the same + `applicationId` are bundled in one stream resource. The stream resource will + be called like the Postgres cluster plus "-" suffix. Required. * **database** Name of the database from where events will be published via Postgres' @@ -610,21 +667,37 @@ can have the following properties: * **tables** Defines a map of table names and their properties (`eventType`, `idColumn` - and `payloadColumn`). The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/). + and `payloadColumn`). Required. + The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/). The application is responsible for putting events into a (JSON/B or VARCHAR) payload column of the outbox table in the structure of the specified target - event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/15/logical-replication-publication.html) + event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/17/logical-replication-publication.html) in Postgres for all tables specified for one `database` and `applicationId`. The CDC operator will consume from it shortly after transactions are committed to the outbox table. The `idColumn` will be used in telemetry for the CDC operator. The names for `idColumn` and `payloadColumn` can be configured. Defaults are `id` and `payload`. The target `eventType` has to - be defined. Required. + be defined. One can also specify a `recoveryEventType` that will be used + for a dead letter queue. By enabling `ignoreRecovery`, you can choose to + ignore failing events. * **filter** Streamed events can be filtered by a jsonpath expression for each table. Optional. +* **enableRecovery** + Flag to enable a dead letter queue recovery for all streams tables. + Alternatively, recovery can also be enable for single outbox tables by only + specifying a `recoveryEventType` and no `enableRecovery` flag. When set to + false or missing, events will be retried until consuming succeeded. You can + use a `filter` expression to get rid of poison pills. Optional. + * **batchSize** Defines the size of batches in which events are consumed. Optional. Defaults to 1. + +* **cpu** + CPU requests to be set as an annotation on the stream resource. Optional. + +* **memory** + memory requests to be set as an annotation on the stream resource. Optional. diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index b759d37b0..95bfb4cf3 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -3,33 +3,46 @@ There are two mutually-exclusive methods to set the Postgres Operator configuration. -* ConfigMaps-based, the legacy one. The configuration is supplied in a - key-value configmap, defined by the `CONFIG_MAP_NAME` environment variable. - Non-scalar values, i.e. lists or maps, are encoded in the value strings using - the comma-based syntax for lists and coma-separated `key:value` syntax for - maps. String values containing ':' should be enclosed in quotes. The - configuration is flat, parameter group names below are not reflected in the - configuration structure. There is an - [example](https://github.com/zalando/postgres-operator/blob/master/manifests/configmap.yaml) - -* CRD-based configuration. The configuration is stored in a custom YAML - manifest. The manifest is an instance of the custom resource definition (CRD) - called `OperatorConfiguration`. The operator registers this CRD during the - start and uses it for configuration if the [operator deployment manifest](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L36) - sets the `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` env variable to a non-empty - value. The variable should point to the `postgresql-operator-configuration` - object in the operator's namespace. - - The CRD-based configuration is a regular YAML document; non-scalar keys are - simply represented in the usual YAML way. There are no default values built-in - in the operator, each parameter that is not supplied in the configuration - receives an empty value. In order to create your own configuration just copy - the [default one](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml) - and change it. - - To test the CRD-based configuration locally, use the following - - ```bash +* ConfigMaps-based, the legacy one +* CRD-based configuration + +Variable names are underscore-separated words. + +### ConfigMaps-based +The configuration is supplied in a +key-value configmap, defined by the `CONFIG_MAP_NAME` environment variable. +Non-scalar values, i.e. lists or maps, are encoded in the value strings using +the comma-based syntax for lists and coma-separated `key:value` syntax for +maps. String values containing ':' should be enclosed in quotes. The +configuration is flat, parameter group names below are not reflected in the +configuration structure. There is an +[example](https://github.com/zalando/postgres-operator/blob/master/manifests/configmap.yaml) + +For the configmap configuration, the [default parameter values](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config.go#L14) +mentioned here are likely to be overwritten in your local operator installation +via your local version of the operator configmap. In the case you use the +operator CRD, all the CRD defaults are provided in the +[operator's default configuration manifest](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml) + +### CRD-based configuration +The configuration is stored in a custom YAML +manifest. The manifest is an instance of the custom resource definition (CRD) +called `OperatorConfiguration`. The operator registers this CRD during the +start and uses it for configuration if the [operator deployment manifest](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L36) +sets the `POSTGRES_OPERATOR_CONFIGURATION_OBJECT` env variable to a non-empty +value. The variable should point to the `postgresql-operator-configuration` +object in the operator's namespace. + +The CRD-based configuration is a regular YAML document; non-scalar keys are +simply represented in the usual YAML way. There are no default values built-in +in the operator, each parameter that is not supplied in the configuration +receives an empty value. In order to create your own configuration just copy +the [default one](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml) +and change it. + +To test the CRD-based configuration locally, use the following + +```bash kubectl create -f manifests/operatorconfiguration.crd.yaml # registers the CRD kubectl create -f manifests/postgresql-operator-default-configuration.yaml @@ -37,7 +50,7 @@ configuration. kubectl create -f manifests/postgres-operator.yaml # set the env var as mentioned above kubectl get operatorconfigurations postgresql-operator-default-configuration -o yaml - ``` +``` The CRD-based configuration is more powerful than the one based on ConfigMaps and should be used unless there is a compatibility requirement to use an already @@ -58,15 +71,6 @@ parameters, those parameters have no effect and are replaced by the `CRD_READY_WAIT_INTERVAL` and `CRD_READY_WAIT_TIMEOUT` environment variables. They will be deprecated and removed in the future. -For the configmap configuration, the [default parameter values](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config.go#L14) -mentioned here are likely to be overwritten in your local operator installation -via your local version of the operator configmap. In the case you use the -operator CRD, all the CRD defaults are provided in the -[operator's default configuration manifest](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml) - -Variable names are underscore-separated words. - - ## General Those are top-level keys, containing both leaf keys and groups. @@ -90,9 +94,6 @@ Those are top-level keys, containing both leaf keys and groups. * **enable_pgversion_env_var** With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`. -* **enable_spilo_wal_path_compat** - enables backwards compatible path between Spilo 12 and Spilo 13+ images. The default is `false`. - * **enable_team_id_clustername_prefix** To lower the risk of name clashes between clusters of different teams you can turn on this flag and the operator will sync only clusters where the @@ -208,7 +209,7 @@ under the `users` key. For all `LOGIN` roles that are not database owners the operator can rotate credentials in the corresponding K8s secrets by replacing the username and password. This means, new users will be added on each rotation inheriting - all priviliges from the original roles. The rotation date (in YYMMDD format) + all privileges from the original roles. The rotation date (in YYMMDD format) is appended to the names of the new user. The timestamp of the next rotation is written to the secret. The default is `false`. @@ -238,7 +239,7 @@ CRD-configuration, they are grouped under the `major_version_upgrade` key. `"manual"` = manifest triggers action, `"full"` = manifest and minimal version violation trigger upgrade. Note, that with all three modes increasing the version in the manifest will - trigger a rolling update of the pods. The default is `"off"`. + trigger a rolling update of the pods. The default is `"manual"`. * **major_version_upgrade_team_allow_list** Upgrades will only be carried out for clusters of listed teams when mode is @@ -246,12 +247,12 @@ CRD-configuration, they are grouped under the `major_version_upgrade` key. * **minimal_major_version** The minimal Postgres major version that will not automatically be upgraded - when `major_version_upgrade_mode` is set to `"full"`. The default is `"11"`. + when `major_version_upgrade_mode` is set to `"full"`. The default is `"13"`. * **target_major_version** The target Postgres major version when upgrading clusters automatically which violate the configured allowed `minimal_major_version` when - `major_version_upgrade_mode` is set to `"full"`. The default is `"15"`. + `major_version_upgrade_mode` is set to `"full"`. The default is `"17"`. ## Kubernetes resources @@ -259,6 +260,31 @@ Parameters to configure cluster-related Kubernetes objects created by the operator, as well as some timeouts associated with them. In a CRD-based configuration they are grouped under the `kubernetes` key. +* **enable_finalizers** + By default, a deletion of the Postgresql resource will trigger an event + that leads to a cleanup of all child resources. However, if the database + cluster is in a broken state (e.g. failed initialization) and the operator + cannot fully sync it, there can be leftovers. By enabling finalizers the + operator will ensure all managed resources are deleted prior to the + Postgresql resource. See also [admin docs](../administrator.md#owner-references-and-finalizers) + for more information The default is `false`. + +* **enable_owner_references** + The operator can set owner references on its child resources (except PVCs, + Patroni config service/endpoint, cross-namespace secrets) to improve cluster + monitoring and enable cascading deletion. The default is `false`. Warning, + enabling this option disables configured delete protection checks (see below). + +* **delete_annotation_date_key** + key name for annotation that compares manifest value with current date in the + YYYY-MM-DD format. Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. + The default is empty which also disables this delete protection check. + +* **delete_annotation_name_key** + key name for annotation that compares manifest value with Postgres cluster name. + Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. The default is + empty which also disables this delete protection check. + * **pod_service_account_name** service account used by Patroni running on individual Pods to communicate with the operator. Required even if native Kubernetes support in Patroni is @@ -289,16 +315,6 @@ configuration they are grouped under the `kubernetes` key. of a database created by the operator. If the annotation key is also provided by the database definition, the database definition value is used. -* **delete_annotation_date_key** - key name for annotation that compares manifest value with current date in the - YYYY-MM-DD format. Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. - The default is empty which also disables this delete protection check. - -* **delete_annotation_name_key** - key name for annotation that compares manifest value with Postgres cluster name. - Allowed pattern: `'([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'`. The default is - empty which also disables this delete protection check. - * **downscaler_annotations** An array of annotations that should be passed from Postgres CRD on to the statefulset and, if exists, to the connection pooler deployment as well. @@ -318,11 +334,40 @@ configuration they are grouped under the `kubernetes` key. pod namespace). * **pdb_name_format** - defines the template for PDB (Pod Disruption Budget) names created by the + defines the template for primary PDB (Pod Disruption Budget) name created by the operator. The default is `postgres-{cluster}-pdb`, where `{cluster}` is replaced by the cluster name. Only the `{cluster}` placeholders is allowed in the template. +* **pdb_master_label_selector** + By default the primary PDB will match the master role hence preventing nodes to be + drained if the node_readiness_label is not used. If this option if set to + `false` the `spilo-role=master` selector will not be added to the PDB. + +* **persistent_volume_claim_retention_policy** + The operator tries to protect volumes as much as possible. If somebody + accidentally deletes the statefulset or scales in the `numberOfInstances` the + Persistent Volume Claims and thus Persistent Volumes will be retained. + However, this can have some consequences when you scale out again at a much + later point, for example after the cluster's Postgres major version has been + upgraded, because the old volume runs the old Postgres version with stale data. + Even if the version has not changed the replication lag could be massive. In + this case a reinitialization of the re-added member would make sense. You can + also modify the [retention policy of PVCs](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention) in the operator configuration. + The behavior can be changed for two scenarios: `when_deleted` - default is + `"retain"` - or `when_scaled` - default is also `"retain"`. The other possible + option is `delete`. + +* **enable_secrets_deletion** + By default, the operator deletes secrets when removing the Postgres cluster + manifest. To keep secrets, set this option to `false`. The default is `true`. + +* **enable_persistent_volume_claim_deletion** + By default, the operator deletes persistent volume claims when removing the + Postgres cluster manifest, no matter if `persistent_volume_claim_retention_policy` + on the statefulset is set to `retain`. To keep PVCs set this option to `false`. + The default is `true`. + * **enable_pod_disruption_budget** PDB is enabled by default to protect the cluster from voluntarily disruptions and hence unwanted DB downtime. However, on some cloud providers it could be @@ -431,7 +476,7 @@ configuration they are grouped under the `kubernetes` key. environment if they not if conflict with the environment variables generated by the operator. The WAL location (bucket path) can be overridden, though. The default is empty. - + * **pod_environment_secret** similar to pod_environment_configmap but referencing a secret with custom environment variables. Because the secret is not allowed to exist in a @@ -507,7 +552,7 @@ configuration they are grouped under the `kubernetes` key. pods with `InitialDelaySeconds: 6`, `PeriodSeconds: 10`, `TimeoutSeconds: 5`, `SuccessThreshold: 1` and `FailureThreshold: 3`. When enabling readiness probes it is recommended to switch the `pod_management_policy` to `parallel` - to avoid unneccesary waiting times in case of multiple instances failing. + to avoid unnecessary waiting times in case of multiple instances failing. The default is `false`. * **storage_resize_mode** @@ -527,19 +572,19 @@ CRD-based configuration. * **default_cpu_request** CPU request value for the Postgres containers, unless overridden by - cluster-specific settings. The default is `100m`. + cluster-specific settings. Empty string or `0` disables the default. * **default_memory_request** memory request value for the Postgres containers, unless overridden by - cluster-specific settings. The default is `100Mi`. + cluster-specific settings. Empty string or `0` disables the default. * **default_cpu_limit** CPU limits for the Postgres containers, unless overridden by cluster-specific - settings. The default is `1`. + settings. Empty string or `0` disables the default. * **default_memory_limit** memory limits for the Postgres containers, unless overridden by cluster-specific - settings. The default is `500Mi`. + settings. Empty string or `0` disables the default. * **max_cpu_request** optional upper boundary for CPU request @@ -549,11 +594,11 @@ CRD-based configuration. * **min_cpu_limit** hard CPU minimum what we consider to be required to properly run Postgres - clusters with Patroni on Kubernetes. The default is `250m`. + clusters with Patroni on Kubernetes. * **min_memory_limit** hard memory minimum what we consider to be required to properly run Postgres - clusters with Patroni on Kubernetes. The default is `250Mi`. + clusters with Patroni on Kubernetes. ## Patroni options @@ -577,7 +622,7 @@ effect, and the parameters are grouped under the `timeouts` key in the CRD-based configuration. * **PatroniAPICheckInterval** - the interval between consecutive attempts waiting for the return of + the interval between consecutive attempts waiting for the return of Patroni Api. The default is `1s`. * **PatroniAPICheckTimeout** @@ -651,12 +696,12 @@ In the CRD-based configuration they are grouped under the `load_balancer` key. balancers. Allowed values are `Cluster` (default) and `Local`. * **master_dns_name_format** - defines the DNS name string template for the master load balancer cluster. + defines the DNS name string template for the master load balancer cluster. The default is `{cluster}.{namespace}.{hostedzone}`, where `{cluster}` is replaced by the cluster name, `{namespace}` is replaced with the namespace and `{hostedzone}` is replaced with the hosted zone (the value of the `db_hosted_zone` parameter). The `{team}` placeholder can still be used, - although it is not recommened because the team of a cluster can change. + although it is not recommended because the team of a cluster can change. If the cluster name starts with the `teamId` it will also be part of the DNS, aynway. No other placeholders are allowed! @@ -675,7 +720,7 @@ In the CRD-based configuration they are grouped under the `load_balancer` key. is replaced by the cluster name, `{namespace}` is replaced with the namespace and `{hostedzone}` is replaced with the hosted zone (the value of the `db_hosted_zone` parameter). The `{team}` placeholder can still be used, - although it is not recommened because the team of a cluster can change. + although it is not recommended because the team of a cluster can change. If the cluster name starts with the `teamId` it will also be part of the DNS, aynway. No other placeholders are allowed! @@ -770,11 +815,11 @@ grouped under the `logical_backup` key. default values from `postgres_pod_resources` will be used. * **logical_backup_docker_image** - An image for pods of the logical backup job. The [example image](https://github.com/zalando/postgres-operator/blob/master/docker/logical-backup/Dockerfile) + An image for pods of the logical backup job. The [example image](https://github.com/zalando/postgres-operator/blob/master/logical-backup/Dockerfile) runs `pg_dumpall` on a replica if possible and uploads compressed results to - an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`. + an S3 bucket under the key `////logical_backups`. The default image is the same image built with the Zalando-internal CI - pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1" + pipeline. Default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" * **logical_backup_google_application_credentials** Specifies the path of the google cloud service account json file. Default is empty. @@ -802,6 +847,9 @@ grouped under the `logical_backup` key. S3 bucket to store backup results. The bucket has to be present and accessible by Postgres pods. Default: empty. +* **logical_backup_s3_bucket_prefix** + S3 bucket prefix to use in configured bucket. Default: "spilo" + * **logical_backup_s3_endpoint** When using non-AWS S3 storage, endpoint can be set as a ENV variable. The default is empty. @@ -816,7 +864,7 @@ grouped under the `logical_backup` key. is specified, no argument will be passed to `aws s3` command. Default: "AES256". * **logical_backup_s3_retention_time** - Specify a retention time for logical backups stored in S3. Backups older than the specified retention + Specify a retention time for logical backups stored in S3. Backups older than the specified retention time will be deleted after a new backup was uploaded. If empty, all backups will be kept. Example values are "3 days", "2 weeks", or "1 month". The default is empty. @@ -825,6 +873,9 @@ grouped under the `logical_backup` key. [reference schedule format](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule) into account. Default: "30 00 \* \* \*" +* **logical_backup_cronjob_environment_secret** + Reference to a Kubernetes secret, which keys will be added as environment variables to the cronjob. Default: "" + ## Debugging the operator Options to aid debugging of the operator itself. Grouped under the `debug` key. @@ -1001,5 +1052,4 @@ operator being able to provide some reasonable defaults. **connection_pooler_default_memory_reques** **connection_pooler_default_cpu_limit** **connection_pooler_default_memory_limit** - Default resource configuration for connection pooler deployment. The internal - default for memory request and limit is `100Mi`, for CPU it is `500m` and `1`. + Default resource configuration for connection pooler deployment. diff --git a/docs/user.md b/docs/user.md index 828badfe8..c1a7c7d45 100644 --- a/docs/user.md +++ b/docs/user.md @@ -30,7 +30,7 @@ spec: databases: foo: zalando postgresql: - version: "15" + version: "17" ``` Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator) @@ -109,7 +109,7 @@ metadata: spec: [...] postgresql: - version: "15" + version: "17" parameters: password_encryption: scram-sha-256 ``` @@ -517,7 +517,7 @@ Postgres Operator will create the following NOLOGIN roles: The `_owner` role is the database owner and should be used when creating new database objects. All members of the `admin` role, e.g. teams API roles, can -become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/15/sql-alterdefaultprivileges.html) +become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/17/sql-alterdefaultprivileges.html) are configured for the owner role so that the `_reader` role automatically gets read-access (SELECT) to new tables and sequences and the `_writer` receives write-access (INSERT, UPDATE, DELETE on tables, @@ -594,7 +594,7 @@ spec: ### Schema `search_path` for default roles -The schema [`search_path`](https://www.postgresql.org/docs/15/ddl-schemas.html#DDL-SCHEMAS-PATH) +The schema [`search_path`](https://www.postgresql.org/docs/17/ddl-schemas.html#DDL-SCHEMAS-PATH) for each role will include the role name and the schemas, this role should have access to. So `foo_bar_writer` does not have to schema-qualify tables from schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and @@ -689,6 +689,30 @@ The minimum limits to properly run the `postgresql` resource are configured to manifest the operator will raise the limits to the configured minimum values. If no resources are defined in the manifest they will be obtained from the configured [default requests](reference/operator_parameters.md#kubernetes-resource-requests). +If neither defaults nor minimum limits are configured the operator will not +specify any resources and it's up to K8s (or your own) admission hooks to +handle it. + +### HugePages support + +The operator supports [HugePages](https://www.postgresql.org/docs/17/kernel-resources.html#LINUX-HUGEPAGES). +To enable HugePages, set the matching resource requests and/or limits in the manifest: + +```yaml +spec: + resources: + requests: + hugepages-2Mi: 250Mi + hugepages-1Gi: 1Gi + limits: + hugepages-2Mi: 500Mi + hugepages-1Gi: 2Gi +``` + +There are no minimums or maximums and the default is 0 for both HugePage sizes, +but Kubernetes will not spin up the pod if the requested HugePages cannot be allocated. +For more information on HugePages in Kubernetes, see also +[https://kubernetes.io/docs/tasks/manage-hugepages/scheduling-hugepages/](https://kubernetes.io/docs/tasks/manage-hugepages/scheduling-hugepages/) ## Use taints, tolerations and node affinity for dedicated PostgreSQL nodes @@ -734,7 +758,7 @@ If you need to define a `nodeAffinity` for all your Postgres clusters use the ## In-place major version upgrade Starting with Spilo 13, operator supports in-place major version upgrade to a -higher major version (e.g. from PG 10 to PG 13). To trigger the upgrade, +higher major version (e.g. from PG 14 to PG 16). To trigger the upgrade, simply increase the version in the manifest. It is your responsibility to test your applications against the new version before the upgrade; downgrading is not supported. The easiest way to do so is to try the upgrade on the cloned @@ -814,7 +838,7 @@ spec: ### Clone directly Another way to get a fresh copy of your source DB cluster is via -[pg_basebackup](https://www.postgresql.org/docs/15/app-pgbasebackup.html). To +[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html). To use this feature simply leave out the timestamp field from the clone section. The operator will connect to the service of the source cluster by name. If the cluster is called test, then the connection string will look like host=test @@ -876,7 +900,7 @@ the PostgreSQL version between source and target cluster has to be the same. To start a cluster as standby, add the following `standby` section in the YAML file. You can stream changes from archived WAL files (AWS S3 or Google Cloud -Storage) or from a remote primary. Only one option can be specfied in the +Storage) or from a remote primary. Only one option can be specified in the manifest: ```yaml @@ -887,7 +911,7 @@ spec: For GCS, you have to define STANDBY_GOOGLE_APPLICATION_CREDENTIALS as a [custom pod environment variable](administrator.md#custom-pod-environment-variables). -It is not set from the config to allow for overridding. +It is not set from the config to allow for overriding. ```yaml spec: @@ -940,33 +964,25 @@ established between standby replica(s). One big advantage of standby clusters is that they can be promoted to a proper database cluster. This means it will stop replicating changes from the source, and start accept writes itself. This mechanism makes it possible to move -databases from one place to another with minimal downtime. Currently, the -operator does not support promoting a standby cluster. It has to be done -manually using `patronictl edit-config` inside the postgres container of the -standby leader pod. Remove the following lines from the YAML structure and the -leader promotion happens immediately. Before doing so, make sure that the -standby is not behind the source database. +databases from one place to another with minimal downtime. -```yaml -standby_cluster: - create_replica_methods: - - bootstrap_standby_with_wale - - basebackup_fast_xlog - restore_command: envdir "/home/postgres/etc/wal-e.d/env-standby" /scripts/restore_command.sh - "%f" "%p" -``` +Before promoting a standby cluster, make sure that the standby is not behind +the source database. You should ideally stop writes to your source cluster and +then create a dummy database object that you check for being replicated in the +target to verify all data has been copied. -Finally, remove the `standby` section from the postgres cluster manifest. +To promote, remove the `standby` section from the postgres cluster manifest. +A rolling update will be triggered removing the `STANDBY_*` environment +variables from the pods, followed by a Patroni config update that promotes the +cluster. -### Turn a normal cluster into a standby +### Adding standby section after promotion -There is no way to transform a non-standby cluster to a standby cluster through -the operator. Adding the `standby` section to the manifest of a running -Postgres cluster will have no effect. But, as explained in the previous -paragraph it can be done manually through `patronictl edit-config`. This time, -by adding the `standby_cluster` section to the Patroni configuration. However, -the transformed standby cluster will not be doing any streaming. It will be in -standby mode and allow read-only transactions only. +Turning a running cluster into a standby is not easily possible and should be +avoided. The best way is to remove the cluster and resubmit the manifest +after a short wait of a few minutes. Adding the `standby` section would turn +the database cluster in read-only mode on next operator SYNC cycle but it +does not sync automatically with the source cluster again. ## Sidecar Support @@ -989,6 +1005,7 @@ spec: env: - name: "ENV_VAR_NAME" value: "any-k8s-env-things" + command: ['sh', '-c', 'echo "logging" > /opt/logs.txt'] ``` In addition to any environment variables you specify, the following environment @@ -1265,7 +1282,7 @@ minutes if the certificates have changed and reloads postgres accordingly. ### TLS certificates for connection pooler By default, the pgBouncer image generates its own TLS certificate like Spilo. -When the `tls` section is specfied in the manifest it will be used for the +When the `tls` section is specified in the manifest it will be used for the connection pooler pod(s) as well. The security context options are hard coded to `runAsUser: 100` and `runAsGroup: 101`. The `fsGroup` will be the same like for Spilo. diff --git a/e2e/Makefile b/e2e/Makefile index 25688b920..52d24e9e5 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -46,7 +46,7 @@ tools: # install pinned version of 'kind' # go install must run outside of a dir with a (module-based) Go project ! # otherwise go install updates project's dependencies and/or behaves differently - cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.14.0 + cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.24.0 e2etest: tools copy clean ./run.sh main diff --git a/e2e/requirements.txt b/e2e/requirements.txt index ea5405b56..d904585be 100644 --- a/e2e/requirements.txt +++ b/e2e/requirements.txt @@ -1,3 +1,3 @@ -kubernetes==24.2.0 +kubernetes==29.2.0 timeout_decorator==0.5.0 -pyyaml==6.0 +pyyaml==6.0.1 diff --git a/e2e/run.sh b/e2e/run.sh index ecef56d08..d289cb3f4 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -8,7 +8,7 @@ IFS=$'\n\t' readonly cluster_name="postgres-operator-e2e-tests" readonly kubeconfig_path="/tmp/kind-config-${cluster_name}" -readonly spilo_image="registry.opensource.zalan.do/acid/spilo-15-e2e:0.1" +readonly spilo_image="registry.opensource.zalan.do/acid/spilo-17-e2e:0.3" readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4" export GOPATH=${GOPATH-~/go} diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 34cf3659c..1f42ad4bc 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -20,6 +20,7 @@ def __init__(self): self.config = config.load_kube_config() self.k8s_client = client.ApiClient() + self.rbac_api = client.RbacAuthorizationV1Api() self.core_v1 = client.CoreV1Api() self.apps_v1 = client.AppsV1Api() @@ -217,7 +218,6 @@ def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): pod_phase = 'Failing over' new_pod_node = '' pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) - while (pod_phase != 'Running') or (new_pod_node not in failover_targets): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items if pods: @@ -314,7 +314,7 @@ def get_operator_state(self): def get_patroni_running_members(self, pod="acid-minimal-cluster-0"): result = self.get_patroni_state(pod) - return list(filter(lambda x: "State" in x and x["State"] == "running", result)) + return list(filter(lambda x: "State" in x and x["State"] in ["running", "streaming"], result)) def get_deployment_replica_count(self, name="acid-minimal-cluster-pooler", namespace="default"): try: @@ -524,7 +524,6 @@ def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): pod_phase = 'Failing over' new_pod_node = '' pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace) - while (pod_phase != 'Running') or (new_pod_node not in failover_targets): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items if pods: @@ -583,7 +582,7 @@ def get_patroni_state(self, pod): def get_patroni_running_members(self, pod): result = self.get_patroni_state(pod) - return list(filter(lambda x: x["State"] == "running", result)) + return list(filter(lambda x: x["State"] in ["running", "streaming"], result)) def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'): ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index ff5b6d4bf..b9a2a27d4 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -12,9 +12,9 @@ from tests.k8s_api import K8s from kubernetes.client.rest import ApiException -SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-15-e2e:0.1" -SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-15-e2e:0.2" - +SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.3" +SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.4" +SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p2" def to_selector(labels): return ",".join(["=".join(lbl) for lbl in labels.items()]) @@ -95,7 +95,7 @@ def setUpClass(cls): print("Failed to delete the 'standard' storage class: {0}".format(e)) # operator deploys pod service account there on start up - # needed for test_multi_namespace_support() + # needed for test_multi_namespace_support and test_owner_references cls.test_namespace = "test" try: v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.test_namespace)) @@ -115,6 +115,7 @@ def setUpClass(cls): configmap = yaml.safe_load(f) configmap["data"]["workers"] = "1" configmap["data"]["docker_image"] = SPILO_CURRENT + configmap["data"]["major_version_upgrade_mode"] = "full" with open("manifests/configmap.yaml", 'w') as f: yaml.dump(configmap, f, Dumper=yaml.Dumper) @@ -129,7 +130,8 @@ def setUpClass(cls): "infrastructure-roles.yaml", "infrastructure-roles-new.yaml", "custom-team-membership.yaml", - "e2e-storage-class.yaml"]: + "e2e-storage-class.yaml", + "fes.crd.yaml"]: result = k8s.create_with_kubectl("manifests/" + filename) print("stdout: {}, stderr: {}".format(result.stdout, result.stderr)) @@ -199,6 +201,7 @@ def test_additional_owner_roles(self): self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", owner_query)), 3, "Not all additional users found in database", 10, 5) + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_additional_pod_capabilities(self): ''' @@ -398,8 +401,8 @@ def test_config_update(self): "max_connections": new_max_connections_value, "wal_level": "logical" } - }, - "patroni": { + }, + "patroni": { "slots": { "first_slot": { "type": "physical" @@ -410,7 +413,7 @@ def test_config_update(self): "retry_timeout": 9, "synchronous_mode": True, "failsafe_mode": True, - } + } } } @@ -513,7 +516,7 @@ def compare_config(): pg_add_new_slots_patch = { "spec": { "patroni": { - "slots": { + "slots": { "test_slot": { "type": "logical", "database": "foo", @@ -909,22 +912,8 @@ def test_ignored_annotations(self): ''' k8s = self.k8s - annotation_patch = { - "metadata": { - "annotations": { - "k8s-status": "healthy" - }, - } - } try: - sts = k8s.api.apps_v1.read_namespaced_stateful_set('acid-minimal-cluster', 'default') - old_sts_creation_timestamp = sts.metadata.creation_timestamp - k8s.api.apps_v1.patch_namespaced_stateful_set(sts.metadata.name, sts.metadata.namespace, annotation_patch) - svc = k8s.api.core_v1.read_namespaced_service('acid-minimal-cluster', 'default') - old_svc_creation_timestamp = svc.metadata.creation_timestamp - k8s.api.core_v1.patch_namespaced_service(svc.metadata.name, svc.metadata.namespace, annotation_patch) - patch_config_ignored_annotations = { "data": { "ignored_annotations": "k8s-status", @@ -933,6 +922,25 @@ def test_ignored_annotations(self): k8s.update_config(patch_config_ignored_annotations) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + sts = k8s.api.apps_v1.read_namespaced_stateful_set('acid-minimal-cluster', 'default') + svc = k8s.api.core_v1.read_namespaced_service('acid-minimal-cluster', 'default') + + annotation_patch = { + "metadata": { + "annotations": { + "k8s-status": "healthy" + }, + } + } + + old_sts_creation_timestamp = sts.metadata.creation_timestamp + k8s.api.apps_v1.patch_namespaced_stateful_set(sts.metadata.name, sts.metadata.namespace, annotation_patch) + old_svc_creation_timestamp = svc.metadata.creation_timestamp + k8s.api.core_v1.patch_namespaced_service(svc.metadata.name, svc.metadata.namespace, annotation_patch) + + k8s.delete_operator_pod() + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + sts = k8s.api.apps_v1.read_namespaced_stateful_set('acid-minimal-cluster', 'default') new_sts_creation_timestamp = sts.metadata.creation_timestamp svc = k8s.api.core_v1.read_namespaced_service('acid-minimal-cluster', 'default') @@ -1174,31 +1182,143 @@ def get_docker_image(): self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) - @unittest.skip("Skipping this test until fixed") def test_major_version_upgrade(self): + """ + Test major version upgrade: with full upgrade, maintenance window, and annotation + """ + def check_version(): + p = k8s.patroni_rest("acid-upgrade-test-0", "") or {} + version = p.get("server_version", 0) // 10000 + return version + + def get_annotations(): + pg_manifest = k8s.api.custom_objects_api.get_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test") + annotations = pg_manifest["metadata"]["annotations"] + return annotations + k8s = self.k8s - result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml") - self.eventuallyEqual(lambda: k8s.count_running_pods(labels="application=spilo,cluster-name=acid-upgrade-test"), 2, "No 2 pods running") + cluster_label = 'application=spilo,cluster-name=acid-upgrade-test' + + with open("manifests/minimal-postgres-lowest-version-manifest.yaml", 'r+') as f: + upgrade_manifest = yaml.safe_load(f) + upgrade_manifest["spec"]["dockerImage"] = SPILO_FULL_IMAGE + + with open("manifests/minimal-postgres-lowest-version-manifest.yaml", 'w') as f: + yaml.dump(upgrade_manifest, f, Dumper=yaml.Dumper) + + k8s.create_with_kubectl("manifests/minimal-postgres-lowest-version-manifest.yaml") + self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running") self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyEqual(check_version, 13, "Version is not correct") - pg_patch_version = { + master_nodes, _ = k8s.get_cluster_nodes(cluster_labels=cluster_label) + # should upgrade immediately + pg_patch_version_14 = { "spec": { - "postgres": { + "postgresql": { "version": "14" } } } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + self.eventuallyEqual(check_version, 14, "Version should be upgraded from 13 to 14") + + # check if annotation for last upgrade's success is set + annotations = get_annotations() + self.assertIsNotNone(annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not set") + + # should not upgrade because current time is not in maintenanceWindow + current_time = datetime.now() + maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}" + pg_patch_version_15_outside_mw = { + "spec": { + "postgresql": { + "version": "15" + }, + "maintenanceWindows": [ + maintenance_window_future + ] + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_outside_mw) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - def check_version_14(): - p = k8s.get_patroni_state("acid-upgrade-test-0") - version = p["server_version"][0:2] - return version + # no pod replacement outside of the maintenance window + k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + self.eventuallyEqual(check_version, 14, "Version should not be upgraded") + + second_annotations = get_annotations() + self.assertIsNone(second_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure should not be set") + + # change maintenanceWindows to current + maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}" + pg_patch_version_15_in_mw = { + "spec": { + "postgresql": { + "version": "15" + }, + "maintenanceWindows": [ + maintenance_window_current + ] + } + } - self.evantuallyEqual(check_version_14, "14", "Version was not upgrade to 14") + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_in_mw) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + self.eventuallyEqual(check_version, 15, "Version should be upgraded from 14 to 15") + + # check if annotation for last upgrade's success is updated after second upgrade + third_annotations = get_annotations() + self.assertIsNotNone(third_annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not set") + self.assertNotEqual(annotations.get("last-major-upgrade-success"), third_annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not updated") + + # test upgrade with failed upgrade annotation + pg_patch_version_17 = { + "metadata": { + "annotations": { + "last-major-upgrade-failure": "2024-01-02T15:04:05Z" + }, + }, + "spec": { + "postgresql": { + "version": "17" + }, + }, + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_17) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + self.eventuallyEqual(check_version, 15, "Version should not be upgraded because annotation for last upgrade's failure is set") + + # change the version back to 15 and should remove failure annotation + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_in_mw) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + + self.eventuallyEqual(check_version, 15, "Version should not be upgraded from 15") + fourth_annotations = get_annotations() + self.assertIsNone(fourth_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure is not removed") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_persistent_volume_claim_retention_policy(self): @@ -1347,17 +1467,11 @@ def test_multi_namespace_support(self): k8s.wait_for_pod_start("spilo-role=master", self.test_namespace) k8s.wait_for_pod_start("spilo-role=replica", self.test_namespace) self.assert_master_is_unique(self.test_namespace, "acid-test-cluster") + # acid-test-cluster will be deleted in test_owner_references test except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) raise - finally: - # delete the new cluster so that the k8s_api.get_operator_state works correctly in subsequent tests - # ideally we should delete the 'test' namespace here but - # the pods inside the namespace stuck in the Terminating state making the test time out - k8s.api.custom_objects_api.delete_namespaced_custom_object( - "acid.zalan.do", "v1", self.test_namespace, "postgresqls", "acid-test-cluster") - time.sleep(5) @timeout_decorator.timeout(TEST_TIMEOUT_SEC) @unittest.skip("Skipping this test until fixed") @@ -1568,25 +1682,96 @@ def test_overwrite_pooler_deployment(self): self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler="+pooler_name), 0, "Pooler pods not scaled down") + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_owner_references(self): + ''' + Enable owner references, test if resources get updated and test cascade deletion of test cluster. + ''' + k8s = self.k8s + cluster_name = 'acid-test-cluster' + cluster_label = 'application=spilo,cluster-name={}'.format(cluster_name) + default_test_cluster = 'acid-minimal-cluster' + + try: + # enable owner references in config + enable_owner_refs = { + "data": { + "enable_owner_references": "true" + } + } + k8s.update_config(enable_owner_refs) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + time.sleep(5) # wait for the operator to sync the cluster and update resources + + # check if child resources were updated with owner references + self.assertTrue(self.check_cluster_child_resources_owner_references(cluster_name, self.test_namespace), "Owner references not set on all child resources of {}".format(cluster_name)) + self.assertTrue(self.check_cluster_child_resources_owner_references(default_test_cluster), "Owner references not set on all child resources of {}".format(default_test_cluster)) + + # delete the new cluster to test owner references + # and also to make k8s_api.get_operator_state work better in subsequent tests + # ideally we should delete the 'test' namespace here but the pods + # inside the namespace stuck in the Terminating state making the test time out + k8s.api.custom_objects_api.delete_namespaced_custom_object( + "acid.zalan.do", "v1", self.test_namespace, "postgresqls", cluster_name) + + # child resources with owner references should be deleted via owner references + self.eventuallyEqual(lambda: k8s.count_pods_with_label(cluster_label), 0, "Pods not deleted") + self.eventuallyEqual(lambda: k8s.count_statefulsets_with_label(cluster_label), 0, "Statefulset not deleted") + self.eventuallyEqual(lambda: k8s.count_services_with_label(cluster_label), 0, "Services not deleted") + self.eventuallyEqual(lambda: k8s.count_endpoints_with_label(cluster_label), 0, "Endpoints not deleted") + self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 0, "Secrets were not deleted") + + time.sleep(5) # wait for the operator to also delete the PVCs + + # pvcs do not have an owner reference but will deleted by the operator almost immediately + self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 0, "PVCs not deleted") + + # disable owner references in config + disable_owner_refs = { + "data": { + "enable_owner_references": "false" + } + } + k8s.update_config(disable_owner_refs) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + time.sleep(5) # wait for the operator to remove owner references + + # check if child resources were updated without Postgresql owner references + self.assertTrue(self.check_cluster_child_resources_owner_references(default_test_cluster, "default", True), "Owner references still present on some child resources of {}".format(default_test_cluster)) + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_password_rotation(self): ''' Test password rotation and removal of users due to retention policy ''' k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' leader = k8s.get_cluster_leader_pod() today = date.today() + # remember number of secrets to make sure it stays the same + secret_count = k8s.count_secrets_with_label(cluster_label) + # enable password rotation for owner of foo database - pg_patch_inplace_rotation_for_owner = { + pg_patch_rotation_single_users = { "spec": { + "usersIgnoringSecretRotation": [ + "test.db_user" + ], "usersWithInPlaceSecretRotation": [ "zalando" ] } } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_inplace_rotation_for_owner) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_rotation_single_users) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") # check if next rotation date was set in secret @@ -1629,6 +1814,7 @@ def test_password_rotation(self): enable_password_rotation = { "data": { "enable_password_rotation": "true", + "inherited_annotations": "environment", "password_rotation_interval": "30", "password_rotation_user_retention": "30", # should be set to 60 }, @@ -1675,6 +1861,29 @@ def test_password_rotation(self): self.eventuallyEqual(lambda: len(self.query_database_with_user(leader.metadata.name, "postgres", "SELECT 1", "foo_user")), 1, "Could not connect to the database with rotation user {}".format(rotation_user), 10, 5) + # add annotation which triggers syncSecrets call + pg_annotation_patch = { + "metadata": { + "annotations": { + "environment": "test", + } + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_annotation_patch) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + time.sleep(10) + self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), secret_count, "Unexpected number of secrets") + + # check if rotation has been ignored for user from test_cross_namespace_secrets test + db_user_secret = k8s.get_secret(username="test.db_user", namespace="test") + secret_username = str(base64.b64decode(db_user_secret.data["username"]), 'utf-8') + self.assertEqual("test.db_user", secret_username, + "Unexpected username in secret of test.db_user: expected {}, got {}".format("test.db_user", secret_username)) + + # check if annotation for secret has been updated + self.assertTrue("environment" in db_user_secret.metadata.annotations, "Added annotation was not propagated to secret") + # disable password rotation for all other users (foo_user) # and pick smaller intervals to see if the third fake rotation user is dropped enable_password_rotation = { @@ -1756,7 +1965,6 @@ def test_rolling_update_flag(self): replica = k8s.get_cluster_replica_pod() self.assertTrue(replica.metadata.creation_timestamp > old_creation_timestamp, "Old master pod was not recreated") - except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) raise @@ -1913,7 +2121,7 @@ def test_statefulset_annotation_propagation(self): patch_sset_propagate_annotations = { "data": { "downscaler_annotations": "deployment-time,downscaler/*", - "inherited_annotations": "owned-by", + "inherited_annotations": "environment,owned-by", } } k8s.update_config(patch_sset_propagate_annotations) @@ -1974,6 +2182,157 @@ def test_standby_cluster(self): "acid.zalan.do", "v1", "default", "postgresqls", "acid-standby-cluster") time.sleep(5) + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) + def test_stream_resources(self): + ''' + Create and delete fabric event streaming resources. + ''' + k8s = self.k8s + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, + "Operator does not get in sync") + leader = k8s.get_cluster_leader_pod() + + # patch ClusterRole with CRUD privileges on FES resources + cluster_role = k8s.api.rbac_api.read_cluster_role("postgres-operator") + fes_cluster_role_rule = client.V1PolicyRule( + api_groups=["zalando.org"], + resources=["fabriceventstreams"], + verbs=["create", "delete", "deletecollection", "get", "list", "patch", "update", "watch"] + ) + cluster_role.rules.append(fes_cluster_role_rule) + + try: + k8s.api.rbac_api.patch_cluster_role("postgres-operator", cluster_role) + + # create a table in one of the database of acid-minimal-cluster + create_stream_table = """ + CREATE TABLE test_table (id int, payload jsonb); + """ + self.query_database(leader.metadata.name, "foo", create_stream_table) + + # update the manifest with the streams section + patch_streaming_config = { + "spec": { + "patroni": { + "slots": { + "manual_slot": { + "type": "physical" + } + } + }, + "streams": [ + { + "applicationId": "test-app", + "batchSize": 100, + "cpu": "100m", + "memory": "200Mi", + "database": "foo", + "enableRecovery": True, + "tables": { + "test_table": { + "eventType": "test-event", + "idColumn": "id", + "payloadColumn": "payload", + "recoveryEventType": "test-event-dlq" + } + } + }, + { + "applicationId": "test-app2", + "batchSize": 100, + "database": "foo", + "enableRecovery": True, + "tables": { + "test_non_exist_table": { + "eventType": "test-event", + "idColumn": "id", + "payloadColumn": "payload", + "ignoreRecovery": True + } + } + } + ] + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', patch_streaming_config) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # check if publication, slot, and fes resource are created + get_publication_query = """ + SELECT * FROM pg_publication WHERE pubname = 'fes_foo_test_app'; + """ + get_slot_query = """ + SELECT * FROM pg_replication_slots WHERE slot_name = 'fes_foo_test_app'; + """ + self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_publication_query)), 1, + "Publication is not created", 10, 5) + self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_slot_query)), 1, + "Replication slot is not created", 10, 5) + self.eventuallyEqual(lambda: len(k8s.api.custom_objects_api.list_namespaced_custom_object( + "zalando.org", "v1", "default", "fabriceventstreams", label_selector="cluster-name=acid-minimal-cluster")["items"]), 1, + "Could not find Fabric Event Stream resource", 10, 5) + + # check if the non-existing table in the stream section does not create a publication and slot + get_publication_query_not_exist_table = """ + SELECT * FROM pg_publication WHERE pubname = 'fes_foo_test_app2'; + """ + get_slot_query_not_exist_table = """ + SELECT * FROM pg_replication_slots WHERE slot_name = 'fes_foo_test_app2'; + """ + self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_publication_query_not_exist_table)), 0, + "Publication is created for non-existing tables", 10, 5) + self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_slot_query_not_exist_table)), 0, + "Replication slot is created for non-existing tables", 10, 5) + + # grant create and ownership of test_table to foo_user, reset search path to default + grant_permission_foo_user = """ + GRANT CREATE ON DATABASE foo TO foo_user; + ALTER TABLE test_table OWNER TO foo_user; + ALTER ROLE foo_user RESET search_path; + """ + self.query_database(leader.metadata.name, "foo", grant_permission_foo_user) + # non-postgres user creates a publication + create_nonstream_publication = """ + CREATE PUBLICATION mypublication FOR TABLE test_table; + """ + self.query_database_with_user(leader.metadata.name, "foo", create_nonstream_publication, "foo_user") + + # remove the streams section from the manifest + patch_streaming_config_removal = { + "spec": { + "streams": [] + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + 'acid.zalan.do', 'v1', 'default', 'postgresqls', 'acid-minimal-cluster', patch_streaming_config_removal) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + # check if publication, slot, and fes resource are removed + self.eventuallyEqual(lambda: len(k8s.api.custom_objects_api.list_namespaced_custom_object( + "zalando.org", "v1", "default", "fabriceventstreams", label_selector="cluster-name=acid-minimal-cluster")["items"]), 0, + 'Could not delete Fabric Event Stream resource', 10, 5) + self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_publication_query)), 0, + "Publication is not deleted", 10, 5) + self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_slot_query)), 0, + "Replication slot is not deleted", 10, 5) + + # check the manual_slot and mypublication should not get deleted + get_manual_slot_query = """ + SELECT * FROM pg_replication_slots WHERE slot_name = 'manual_slot'; + """ + get_nonstream_publication_query = """ + SELECT * FROM pg_publication WHERE pubname = 'mypublication'; + """ + self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", get_manual_slot_query)), 1, + "Slot defined in patroni config is deleted", 10, 5) + self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "foo", get_nonstream_publication_query)), 1, + "Publication defined not in stream section is deleted", 10, 5) + + except timeout_decorator.TimeoutError: + print('Operator log: {}'.format(k8s.get_operator_log())) + raise + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_taint_based_eviction(self): ''' @@ -2038,7 +2397,9 @@ def test_zz_cluster_deletion(self): patch_delete_annotations = { "data": { "delete_annotation_date_key": "delete-date", - "delete_annotation_name_key": "delete-clustername" + "delete_annotation_name_key": "delete-clustername", + "enable_secrets_deletion": "false", + "enable_persistent_volume_claim_deletion": "false" } } k8s.update_config(patch_delete_annotations) @@ -2098,7 +2459,8 @@ def test_zz_cluster_deletion(self): self.eventuallyEqual(lambda: k8s.count_statefulsets_with_label(cluster_label), 0, "Statefulset not deleted") self.eventuallyEqual(lambda: k8s.count_deployments_with_label(cluster_label), 0, "Deployments not deleted") self.eventuallyEqual(lambda: k8s.count_pdbs_with_label(cluster_label), 0, "Pod disruption budget not deleted") - self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 0, "Secrets not deleted") + self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), 8, "Secrets were deleted although disabled in config") + self.eventuallyEqual(lambda: k8s.count_pvcs_with_label(cluster_label), 3, "PVCs were deleted although disabled in config") except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -2184,6 +2546,46 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci return True + def check_cluster_child_resources_owner_references(self, cluster_name, cluster_namespace='default', inverse=False): + k8s = self.k8s + + # check if child resources were updated with owner references + sset = k8s.api.apps_v1.read_namespaced_stateful_set(cluster_name, cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(sset.metadata.owner_references, inverse), "statefulset owner reference check failed") + + svc = k8s.api.core_v1.read_namespaced_service(cluster_name, cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(svc.metadata.owner_references, inverse), "primary service owner reference check failed") + replica_svc = k8s.api.core_v1.read_namespaced_service(cluster_name + "-repl", cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(replica_svc.metadata.owner_references, inverse), "replica service owner reference check failed") + config_svc = k8s.api.core_v1.read_namespaced_service(cluster_name + "-config", cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(config_svc.metadata.owner_references, inverse), "config service owner reference check failed") + + ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name, cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(ep.metadata.owner_references, inverse), "primary endpoint owner reference check failed") + replica_ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name + "-repl", cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(replica_ep.metadata.owner_references, inverse), "replica endpoint owner reference check failed") + config_ep = k8s.api.core_v1.read_namespaced_endpoints(cluster_name + "-config", cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(config_ep.metadata.owner_references, inverse), "config endpoint owner reference check failed") + + pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-pdb".format(cluster_name), cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "primary pod disruption budget owner reference check failed") + + pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-critical-op-pdb".format(cluster_name), cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "pod disruption budget for critical operations owner reference check failed") + + pg_secret = k8s.api.core_v1.read_namespaced_secret("postgres.{}.credentials.postgresql.acid.zalan.do".format(cluster_name), cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(pg_secret.metadata.owner_references, inverse), "postgres secret owner reference check failed") + standby_secret = k8s.api.core_v1.read_namespaced_secret("standby.{}.credentials.postgresql.acid.zalan.do".format(cluster_name), cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(standby_secret.metadata.owner_references, inverse), "standby secret owner reference check failed") + + return True + + def has_postgresql_owner_reference(self, owner_references, inverse): + if inverse: + return owner_references is None or owner_references[0].kind != 'postgresql' + + return owner_references is not None and owner_references[0].kind == 'postgresql' and owner_references[0].controller + def list_databases(self, pod_name): ''' Get list of databases we might want to iterate over diff --git a/go.mod b/go.mod index 800c1cdde..9c0125229 100644 --- a/go.mod +++ b/go.mod @@ -1,71 +1,75 @@ module github.com/zalando/postgres-operator -go 1.19 +go 1.23.4 require ( - github.com/aws/aws-sdk-go v1.42.18 + github.com/aws/aws-sdk-go v1.53.8 github.com/golang/mock v1.6.0 - github.com/lib/pq v1.10.4 + github.com/lib/pq v1.10.9 github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d github.com/pkg/errors v0.9.1 github.com/r3labs/diff v1.1.0 - github.com/sirupsen/logrus v1.9.0 - github.com/stretchr/testify v1.8.0 - golang.org/x/crypto v0.14.0 - golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a + github.com/sirupsen/logrus v1.9.3 + github.com/stretchr/testify v1.9.0 + golang.org/x/crypto v0.31.0 + golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.25.9 + k8s.io/api v0.30.4 k8s.io/apiextensions-apiserver v0.25.9 - k8s.io/apimachinery v0.25.9 - k8s.io/client-go v0.25.9 + k8s.io/apimachinery v0.30.4 + k8s.io/client-go v0.30.4 k8s.io/code-generator v0.25.9 ) require ( - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/Masterminds/semver v1.5.0 github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.8 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/text v0.2.0 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/mod v0.8.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - golang.org/x/tools v0.6.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect - k8s.io/klog/v2 v2.70.1 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect + k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect + k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 6b0309daa..0e55f2dd7 100644 --- a/go.sum +++ b/go.sum @@ -1,148 +1,54 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/aws/aws-sdk-go v1.42.18 h1:2f/cDNwQ3e+yHxtPn1si0to3GalbNHwkRm461IjwRiM= -github.com/aws/aws-sdk-go v1.42.18/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-sdk-go v1.53.8 h1:eoqGb1WOHIrCFKo1d51cMcnt1ralfLFaEqRkC5Zzv8k= +github.com/aws/aws-sdk-go v1.53.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -153,23 +59,20 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= -github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -181,321 +84,105 @@ github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+p github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M= github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a h1:tlXy25amD5A7gOfbXdqCGN5k8ESEed/Ee1E5RcrYnqU= -golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= +golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -503,42 +190,33 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.9 h1:XuJ2bz2F52jZmp3YjUcp/pozH8kY1BlBHdXnoOXBP3U= -k8s.io/api v0.25.9/go.mod h1:9YRWzD0cRHzfsnf9e5OQsQ4Un6cbZ//Xv3jo44YKm2Y= +k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs= +k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0= k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8= k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M= -k8s.io/apimachinery v0.25.9 h1:MPjgTz4dbAKJ/KiHIvDeYkFfIn7ueihqvT520HkV7v4= -k8s.io/apimachinery v0.25.9/go.mod h1:ZTl0drTQaFi5gMM3snYI5tWV1XJmRH1gfnDx2QCLsxk= -k8s.io/client-go v0.25.9 h1:U0S3nc71NRfHXiA0utyCkPt3Mv1SWpQw0g5VfBCv5xg= -k8s.io/client-go v0.25.9/go.mod h1:tmPyOtpbbkneXj65EYZ4sXun1BE/2F2XlRABVj9CBgc= +k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY= +k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY= +k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc= k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w= k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= +k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= +k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index ff78d68c3..e6fcae78c 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -15,7 +15,7 @@ cleanup() { } trap "cleanup" EXIT SIGINT -bash "${CODEGEN_PKG}/generate-groups.sh" all \ +bash "${CODEGEN_PKG}/generate-groups.sh" client,deepcopy,informer,lister \ "${OPERATOR_PACKAGE_ROOT}/pkg/generated" "${OPERATOR_PACKAGE_ROOT}/pkg/apis" \ "acid.zalan.do:v1 zalando.org:v1" \ --go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt \ diff --git a/kubectl-pg/cmd/create.go b/kubectl-pg/cmd/create.go index 00ee7ac24..3d34a7d25 100644 --- a/kubectl-pg/cmd/create.go +++ b/kubectl-pg/cmd/create.go @@ -25,8 +25,8 @@ package cmd import ( "context" "fmt" - "io/ioutil" "log" + "os" "github.com/spf13/cobra" v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" @@ -56,7 +56,7 @@ func create(fileName string) { if err != nil { log.Fatal(err) } - ymlFile, err := ioutil.ReadFile(fileName) + ymlFile, err := os.ReadFile(fileName) if err != nil { log.Fatal(err) } diff --git a/kubectl-pg/cmd/delete.go b/kubectl-pg/cmd/delete.go index 7737212b9..73a6e7b0b 100644 --- a/kubectl-pg/cmd/delete.go +++ b/kubectl-pg/cmd/delete.go @@ -25,8 +25,8 @@ package cmd import ( "context" "fmt" - "io/ioutil" "log" + "os" "github.com/spf13/cobra" v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" @@ -77,7 +77,7 @@ func deleteByFile(file string) { log.Fatal(err) } - ymlFile, err := ioutil.ReadFile(file) + ymlFile, err := os.ReadFile(file) if err != nil { log.Fatal(err) } diff --git a/kubectl-pg/cmd/update.go b/kubectl-pg/cmd/update.go index 6a5f4e36d..eb9259586 100644 --- a/kubectl-pg/cmd/update.go +++ b/kubectl-pg/cmd/update.go @@ -25,8 +25,8 @@ package cmd import ( "context" "fmt" - "io/ioutil" "log" + "os" "github.com/spf13/cobra" v1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" @@ -60,7 +60,7 @@ func updatePgResources(fileName string) { if err != nil { log.Fatal(err) } - ymlFile, err := ioutil.ReadFile(fileName) + ymlFile, err := os.ReadFile(fileName) if err != nil { log.Fatal(err) } diff --git a/kubectl-pg/go.mod b/kubectl-pg/go.mod index e3b5b7eda..9b2e1bbc5 100644 --- a/kubectl-pg/go.mod +++ b/kubectl-pg/go.mod @@ -1,69 +1,74 @@ module github.com/zalando/postgres-operator/kubectl-pg -go 1.18 +go 1.23.4 require ( - github.com/spf13/cobra v1.4.0 - github.com/spf13/viper v1.9.0 - github.com/zalando/postgres-operator v1.10.0 - k8s.io/api v0.25.9 + github.com/spf13/cobra v1.8.1 + github.com/spf13/viper v1.19.0 + github.com/zalando/postgres-operator v1.13.0 + k8s.io/api v0.30.4 k8s.io/apiextensions-apiserver v0.25.9 - k8s.io/apimachinery v0.25.9 - k8s.io/client-go v0.25.9 + k8s.io/apimachinery v0.30.4 + k8s.io/client-go v0.30.4 ) require ( - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.4.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.6 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/text v0.2.0 // indirect - github.com/magiconair/properties v1.8.5 // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/mitchellh/mapstructure v1.4.2 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pelletier/go-toml v1.9.4 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/spf13/afero v1.6.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/subosito/gotenv v1.2.0 // indirect - golang.org/x/crypto v0.8.0 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/term v0.7.0 // indirect - golang.org/x/text v0.9.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.63.2 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.70.1 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/kubectl-pg/go.sum b/kubectl-pg/go.sum index 7069fe547..2237a9e03 100644 --- a/kubectl-pg/go.sum +++ b/kubectl-pg/go.sum @@ -1,764 +1,232 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= -github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+pW6rOkFdld9QQ7jRydBKKM6jyPVI= github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= -github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zalando/postgres-operator v1.10.0 h1:7/Xp9v6knCbZc4SXc3t6/5uyiHpqumc2SYPudXazJZw= -github.com/zalando/postgres-operator v1.10.0/go.mod h1:UYVdslgiYgsKSuU24Mne2qO67nuWTJwWiT1WQDurROs= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zalando/postgres-operator v1.13.0 h1:T9Mb+ZRQyTxXbagIK66GLVGCwM3661aX2lOkNpax4s8= +github.com/zalando/postgres-operator v1.13.0/go.mod h1:WiMEKzUny2lJHYle+7+D/5BhlvPn8prl76rEDYLsQAg= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= +golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= -gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.25.9 h1:XuJ2bz2F52jZmp3YjUcp/pozH8kY1BlBHdXnoOXBP3U= -k8s.io/api v0.25.9/go.mod h1:9YRWzD0cRHzfsnf9e5OQsQ4Un6cbZ//Xv3jo44YKm2Y= +k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs= +k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0= k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8= k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M= -k8s.io/apimachinery v0.25.9 h1:MPjgTz4dbAKJ/KiHIvDeYkFfIn7ueihqvT520HkV7v4= -k8s.io/apimachinery v0.25.9/go.mod h1:ZTl0drTQaFi5gMM3snYI5tWV1XJmRH1gfnDx2QCLsxk= -k8s.io/client-go v0.25.9 h1:U0S3nc71NRfHXiA0utyCkPt3Mv1SWpQw0g5VfBCv5xg= -k8s.io/client-go v0.25.9/go.mod h1:tmPyOtpbbkneXj65EYZ4sXun1BE/2F2XlRABVj9CBgc= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY= +k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY= +k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/docker/logical-backup/Dockerfile b/logical-backup/Dockerfile similarity index 88% rename from docker/logical-backup/Dockerfile rename to logical-backup/Dockerfile index 08553fc5f..137f4efa8 100644 --- a/docker/logical-backup/Dockerfile +++ b/logical-backup/Dockerfile @@ -1,4 +1,5 @@ -FROM registry.opensource.zalan.do/library/ubuntu-18.04:latest +ARG BASE_IMAGE=registry.opensource.zalan.do/library/ubuntu-22.04:latest +FROM ${BASE_IMAGE} LABEL maintainer="Team ACID @ Zalando " SHELL ["/bin/bash", "-o", "pipefail", "-c"] @@ -24,12 +25,11 @@ RUN apt-get update \ && curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && apt-get update \ && apt-get install --no-install-recommends -y \ + postgresql-client-17 \ + postgresql-client-16 \ postgresql-client-15 \ postgresql-client-14 \ postgresql-client-13 \ - postgresql-client-12 \ - postgresql-client-11 \ - postgresql-client-10 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/docker/logical-backup/dump.sh b/logical-backup/dump.sh similarity index 90% rename from docker/logical-backup/dump.sh rename to logical-backup/dump.sh index 3d2f60911..25641c3b5 100755 --- a/docker/logical-backup/dump.sh +++ b/logical-backup/dump.sh @@ -45,7 +45,7 @@ function compress { } function az_upload { - PATH_TO_BACKUP=$LOGICAL_BACKUP_S3_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz + PATH_TO_BACKUP=$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz az storage blob upload --file "$1" --account-name "$LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_NAME" --account-key "$LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY" -c "$LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER" -n "$PATH_TO_BACKUP" } @@ -72,7 +72,7 @@ function aws_delete_outdated { cutoff_date=$(date -d "$LOGICAL_BACKUP_S3_RETENTION_TIME ago" +%F) # mimic bucket setup from Spilo - prefix="spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/" + prefix=$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/" args=( "--no-paginate" @@ -107,7 +107,7 @@ function aws_upload { # mimic bucket setup from Spilo # to keep logical backups at the same path as WAL # NB: $LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX already contains the leading "/" when set by the Postgres Operator - PATH_TO_BACKUP=s3://$LOGICAL_BACKUP_S3_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz + PATH_TO_BACKUP=s3://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz args=() @@ -120,7 +120,7 @@ function aws_upload { } function gcs_upload { - PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz + PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz gsutil -o Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS cp - "$PATH_TO_BACKUP" } diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index aa94dab23..44d317123 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -10,7 +10,7 @@ metadata: # "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured # "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured spec: - dockerImage: ghcr.io/zalando/spilo-15:3.0-p1 + dockerImage: ghcr.io/zalando/spilo-17:4.0-p2 teamId: "acid" numberOfInstances: 2 users: # Application/Robot users @@ -19,6 +19,8 @@ spec: - createdb foo_user: [] # flyway: [] +# usersIgnoringSecretRotation: +# - bar_user # usersWithSecretRotation: # - foo_user # usersWithInPlaceSecretRotation: @@ -46,7 +48,7 @@ spec: defaultRoles: true defaultUsers: false postgresql: - version: "15" + version: "17" parameters: # Expert section shared_buffers: "32MB" max_connections: "10" @@ -66,6 +68,8 @@ spec: # matchLabels: # environment: dev # service: postgres +# subPath: $(NODE_NAME)/$(POD_NAME) +# isSubPathExpr: true additionalVolumes: - name: empty mountPath: /opt/empty @@ -81,6 +85,16 @@ spec: # PersistentVolumeClaim: # claimName: pvc-postgresql-data-partitions # readyOnly: false +# - name: data +# mountPath: /home/postgres/pgdata/partitions +# subPath: $(NODE_NAME)/$(POD_NAME) +# isSubPathExpr: true +# targetContainers: +# - postgres +# volumeSource: +# PersistentVolumeClaim: +# claimName: pvc-postgresql-data-partitions +# readyOnly: false # - name: conf # mountPath: /etc/telegraf # subPath: telegraf.conf @@ -107,9 +121,13 @@ spec: requests: cpu: 10m memory: 100Mi +# hugepages-2Mi: 128Mi +# hugepages-1Gi: 1Gi limits: cpu: 500m memory: 500Mi +# hugepages-2Mi: 128Mi +# hugepages-1Gi: 1Gi patroni: failsafe_mode: false initdb: @@ -145,6 +163,7 @@ spec: # run periodic backups with k8s cron jobs # enableLogicalBackup: true +# logicalBackupRetention: "3 months" # logicalBackupSchedule: "30 00 * * *" # maintenanceWindows: diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 3bba4c50e..9473ef5ec 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -13,57 +13,62 @@ data: cluster_history_entries: "1000" cluster_labels: application:spilo cluster_name_label: cluster-name - # connection_pooler_default_cpu_limit: "1" - # connection_pooler_default_cpu_request: "500m" - # connection_pooler_default_memory_limit: 100Mi - # connection_pooler_default_memory_request: 100Mi - connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27" - # connection_pooler_max_db_connections: 60 - # connection_pooler_mode: "transaction" - # connection_pooler_number_of_instances: 2 - # connection_pooler_schema: "pooler" - # connection_pooler_user: "pooler" + connection_pooler_default_cpu_limit: "1" + connection_pooler_default_cpu_request: "500m" + connection_pooler_default_memory_limit: 100Mi + connection_pooler_default_memory_request: 100Mi + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32" + connection_pooler_max_db_connections: "60" + connection_pooler_mode: "transaction" + connection_pooler_number_of_instances: "2" + connection_pooler_schema: "pooler" + connection_pooler_user: "pooler" crd_categories: "all" # custom_service_annotations: "keyx:valuez,keya:valuea" # custom_pod_annotations: "keya:valuea,keyb:valueb" db_hosted_zone: db.example.com debug_logging: "true" - # default_cpu_limit: "1" - # default_cpu_request: 100m - # default_memory_limit: 500Mi - # default_memory_request: 100Mi + default_cpu_limit: "1" + default_cpu_request: 100m + default_memory_limit: 500Mi + default_memory_request: 100Mi # delete_annotation_date_key: delete-date # delete_annotation_name_key: delete-clustername - docker_image: ghcr.io/zalando/spilo-15:3.0-p1 + docker_image: ghcr.io/zalando/spilo-17:4.0-p2 # downscaler_annotations: "deployment-time,downscaler/*" - # enable_admin_role_for_users: "true" - # enable_crd_registration: "true" - # enable_cross_namespace_secret: "false" - # enable_database_access: "true" + enable_admin_role_for_users: "true" + enable_crd_registration: "true" + enable_crd_validation: "true" + enable_cross_namespace_secret: "false" + enable_finalizers: "false" + enable_database_access: "true" enable_ebs_gp3_migration: "false" - # enable_ebs_gp3_migration_max_size: "1000" - # enable_init_containers: "true" - # enable_lazy_spilo_upgrade: "false" + enable_ebs_gp3_migration_max_size: "1000" + enable_init_containers: "true" + enable_lazy_spilo_upgrade: "false" enable_master_load_balancer: "false" enable_master_pooler_load_balancer: "false" enable_password_rotation: "false" enable_patroni_failsafe_mode: "false" + enable_owner_references: "false" + enable_persistent_volume_claim_deletion: "true" enable_pgversion_env_var: "true" - # enable_pod_antiaffinity: "false" - # enable_pod_disruption_budget: "true" - # enable_postgres_team_crd: "false" - # enable_postgres_team_crd_superusers: "false" + enable_pod_antiaffinity: "false" + enable_pod_disruption_budget: "true" + enable_postgres_team_crd: "false" + enable_postgres_team_crd_superusers: "false" enable_readiness_probe: "false" enable_replica_load_balancer: "false" enable_replica_pooler_load_balancer: "false" - # enable_shm_volume: "true" - # enable_sidecars: "true" + enable_secrets_deletion: "true" + enable_shm_volume: "true" + enable_sidecars: "true" enable_spilo_wal_path_compat: "true" enable_team_id_clustername_prefix: "false" enable_team_member_deprecation: "false" - # enable_team_superuser: "false" + enable_team_superuser: "false" enable_teams_api: "false" - # etcd_host: "" + etcd_host: "" external_traffic_policy: "Cluster" # gcp_credentials: "" # ignored_annotations: "" @@ -73,53 +78,55 @@ data: # inherited_annotations: owned-by # inherited_labels: application,environment # kube_iam_role: "" - # kubernetes_use_configmaps: "false" + kubernetes_use_configmaps: "false" # log_s3_bucket: "" # logical_backup_azure_storage_account_name: "" # logical_backup_azure_storage_container: "" # logical_backup_azure_storage_account_key: "" # logical_backup_cpu_limit: "" # logical_backup_cpu_request: "" - logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1" + logical_backup_cronjob_environment_secret: "" + logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" # logical_backup_google_application_credentials: "" logical_backup_job_prefix: "logical-backup-" # logical_backup_memory_limit: "" # logical_backup_memory_request: "" logical_backup_provider: "s3" - # logical_backup_s3_access_key_id: "" + logical_backup_s3_access_key_id: "" logical_backup_s3_bucket: "my-bucket-url" - # logical_backup_s3_region: "" - # logical_backup_s3_endpoint: "" - # logical_backup_s3_secret_access_key: "" + logical_backup_s3_bucket_prefix: "spilo" + logical_backup_s3_region: "" + logical_backup_s3_endpoint: "" + logical_backup_s3_secret_access_key: "" logical_backup_s3_sse: "AES256" - # logical_backup_s3_retention_time: "" + logical_backup_s3_retention_time: "" logical_backup_schedule: "30 00 * * *" major_version_upgrade_mode: "manual" # major_version_upgrade_team_allow_list: "" master_dns_name_format: "{cluster}.{namespace}.{hostedzone}" - # master_legacy_dns_name_format: "{cluster}.{team}.{hostedzone}" - # master_pod_move_timeout: 20m - # max_instances: "-1" - # min_instances: "-1" + master_legacy_dns_name_format: "{cluster}.{team}.{hostedzone}" + master_pod_move_timeout: 20m # max_cpu_request: "1" + max_instances: "-1" # max_memory_request: 4Gi - # min_cpu_limit: 250m - # min_memory_limit: 250Mi - # minimal_major_version: "11" + min_cpu_limit: 250m + min_instances: "-1" + min_memory_limit: 250Mi + minimal_major_version: "13" # node_readiness_label: "status:ready" # node_readiness_label_merge: "OR" - # oauth_token_secret_name: postgresql-operator - # pam_configuration: | - # https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees - # pam_role_name: zalandos + oauth_token_secret_name: postgresql-operator + pam_configuration: "https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees" + pam_role_name: zalandos patroni_api_check_interval: "1s" patroni_api_check_timeout: "5s" - # password_rotation_interval: "90" - # password_rotation_user_retention: "180" + password_rotation_interval: "90" + password_rotation_user_retention: "180" + pdb_master_label_selector: "true" pdb_name_format: "postgres-{cluster}-pdb" persistent_volume_claim_retention_policy: "when_deleted:retain,when_scaled:retain" - # pod_antiaffinity_preferred_during_scheduling: "false" - # pod_antiaffinity_topology_key: "kubernetes.io/hostname" + pod_antiaffinity_preferred_during_scheduling: "false" + pod_antiaffinity_topology_key: "kubernetes.io/hostname" pod_deletion_wait_timeout: 10m # pod_environment_configmap: "default/my-custom-config" # pod_environment_secret: "my-custom-secret" @@ -127,17 +134,17 @@ data: pod_management_policy: "ordered_ready" # pod_priority_class_name: "postgres-pod-priority" pod_role_label: spilo-role - # pod_service_account_definition: "" + pod_service_account_definition: "" pod_service_account_name: "postgres-pod" - # pod_service_account_role_binding_definition: "" + pod_service_account_role_binding_definition: "" pod_terminate_grace_period: 5m - # postgres_superuser_teams: "postgres_superusers" - # protected_role_names: "admin,cron_admin" + postgres_superuser_teams: "postgres_superusers" + protected_role_names: "admin,cron_admin" ready_wait_interval: 3s ready_wait_timeout: 30s repair_period: 5m replica_dns_name_format: "{cluster}-repl.{namespace}.{hostedzone}" - # replica_legacy_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" + replica_legacy_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" replication_username: standby resource_check_interval: 3s resource_check_timeout: 10m @@ -147,7 +154,7 @@ data: secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" share_pgsocket_with_sidecars: "false" # sidecar_docker_images: "" - # set_memory_request_to_limit: "false" + set_memory_request_to_limit: "false" spilo_allow_privilege_escalation: "true" # spilo_runasuser: 101 # spilo_runasgroup: 103 @@ -155,10 +162,10 @@ data: spilo_privileged: "false" storage_resize_mode: "pvc" super_username: postgres - # target_major_version: "15" - # team_admin_role: "admin" - # team_api_role_configuration: "log_statement:all" - # teams_api_url: http://fake-teams-api.default.svc.cluster.local + target_major_version: "17" + team_admin_role: "admin" + team_api_role_configuration: "log_statement:all" + teams_api_url: http://fake-teams-api.default.svc.cluster.local # toleration: "key:db-only,operator:Exists,effect:NoSchedule" # wal_az_storage_account: "" # wal_gs_bucket: "" diff --git a/manifests/fes.crd.yaml b/manifests/fes.crd.yaml new file mode 100644 index 000000000..70a8c9555 --- /dev/null +++ b/manifests/fes.crd.yaml @@ -0,0 +1,23 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: fabriceventstreams.zalando.org +spec: + group: zalando.org + names: + kind: FabricEventStream + listKind: FabricEventStreamList + plural: fabriceventstreams + singular: fabriceventstream + shortNames: + - fes + categories: + - all + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object diff --git a/manifests/minimal-fake-pooler-deployment.yaml b/manifests/minimal-fake-pooler-deployment.yaml index 53332bad2..59a32ad0b 100644 --- a/manifests/minimal-fake-pooler-deployment.yaml +++ b/manifests/minimal-fake-pooler-deployment.yaml @@ -23,7 +23,7 @@ spec: serviceAccountName: postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/pgbouncer:master-27 + image: registry.opensource.zalan.do/acid/pgbouncer:master-32 imagePullPolicy: IfNotPresent resources: requests: diff --git a/manifests/minimal-master-replica-svcmonitor.yaml b/manifests/minimal-master-replica-svcmonitor.yaml index 67ed28c81..049ea12eb 100644 --- a/manifests/minimal-master-replica-svcmonitor.yaml +++ b/manifests/minimal-master-replica-svcmonitor.yaml @@ -31,11 +31,21 @@ spec: version: "13" sidecars: - name: "exporter" - image: "wrouesnel/postgres_exporter" + image: "quay.io/prometheuscommunity/postgres-exporter:v0.15.0" ports: - name: exporter containerPort: 9187 protocol: TCP + env: + - name: DATA_SOURCE_URI + value: ":5432/?sslmode=disable" + - name: DATA_SOURCE_USER + value: "postgres" + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: postgres.test-pg.credentials.postgresql.acid.zalan.do + key: password resources: limits: cpu: 500m diff --git a/manifests/minimal-postgres-manifest-12.yaml b/manifests/minimal-postgres-lowest-version-manifest.yaml similarity index 95% rename from manifests/minimal-postgres-manifest-12.yaml rename to manifests/minimal-postgres-lowest-version-manifest.yaml index d578ac46d..40abf0c9c 100644 --- a/manifests/minimal-postgres-manifest-12.yaml +++ b/manifests/minimal-postgres-lowest-version-manifest.yaml @@ -17,4 +17,4 @@ spec: preparedDatabases: bar: {} postgresql: - version: "12" + version: "13" diff --git a/manifests/minimal-postgres-manifest.yaml b/manifests/minimal-postgres-manifest.yaml index 00f11ebf7..8b1ed275d 100644 --- a/manifests/minimal-postgres-manifest.yaml +++ b/manifests/minimal-postgres-manifest.yaml @@ -17,4 +17,4 @@ spec: preparedDatabases: bar: {} postgresql: - version: "15" + version: "17" diff --git a/manifests/operator-service-account-rbac-openshift.yaml b/manifests/operator-service-account-rbac-openshift.yaml index e0e45cc54..e716e82b7 100644 --- a/manifests/operator-service-account-rbac-openshift.yaml +++ b/manifests/operator-service-account-rbac-openshift.yaml @@ -94,6 +94,7 @@ rules: - create - delete - get + - patch - update # to check nodes for node readiness label - apiGroups: @@ -166,6 +167,7 @@ rules: - get - list - patch + - update # to CRUD cron jobs for logical backups - apiGroups: - batch diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index c10dc5fd7..bf27f99f1 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -102,6 +102,7 @@ rules: - delete - get - update + - patch # to check nodes for node readiness label - apiGroups: - "" @@ -173,6 +174,7 @@ rules: - get - list - patch + - update # to CRUD cron jobs for logical backups - apiGroups: - batch diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index e3eff4fca..ded2477d7 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -66,7 +66,7 @@ spec: type: string docker_image: type: string - default: "ghcr.io/zalando/spilo-15:3.0-p1" + default: "ghcr.io/zalando/spilo-17:4.0-p2" enable_crd_registration: type: boolean default: true @@ -158,17 +158,17 @@ spec: properties: major_version_upgrade_mode: type: string - default: "off" + default: "manual" major_version_upgrade_team_allow_list: type: array items: type: string minimal_major_version: type: string - default: "11" + default: "13" target_major_version: type: string - default: "15" + default: "17" kubernetes: type: object properties: @@ -203,9 +203,18 @@ spec: enable_cross_namespace_secret: type: boolean default: false + enable_finalizers: + type: boolean + default: false enable_init_containers: type: boolean default: true + enable_owner_references: + type: boolean + default: false + enable_persistent_volume_claim_deletion: + type: boolean + default: true enable_pod_antiaffinity: type: boolean default: false @@ -215,6 +224,9 @@ spec: enable_readiness_probe: type: boolean default: false + enable_secrets_deletion: + type: boolean + default: true enable_sidecars: type: boolean default: true @@ -273,6 +285,9 @@ spec: oauth_token_secret_name: type: string default: "postgresql-operator" + pdb_master_label_selector: + type: boolean + default: true pdb_name_format: type: string default: "postgres-{cluster}-pdb" @@ -359,34 +374,28 @@ spec: properties: default_cpu_limit: type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default: "1" + pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' default_cpu_request: type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default: "100m" + pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' default_memory_limit: type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default: "500Mi" + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' default_memory_request: type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default: "100Mi" + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' max_cpu_request: type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' + pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' max_memory_request: type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' min_cpu_limit: type: string - pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default: "250m" + pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' min_memory_limit: type: string - pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default: "250Mi" + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' timeouts: type: object properties: @@ -461,7 +470,6 @@ spec: type: string additional_secret_mount_path: type: string - default: "/meta/credentials" aws_region: type: string default: "eu-central-1" @@ -500,7 +508,7 @@ spec: pattern: '^(\d+m|\d+(\.\d{1,3})?)$' logical_backup_docker_image: type: string - default: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1" + default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" logical_backup_google_application_credentials: type: string logical_backup_job_prefix: @@ -523,6 +531,8 @@ spec: type: string logical_backup_s3_bucket: type: string + logical_backup_s3_bucket_prefix: + type: string logical_backup_s3_endpoint: type: string logical_backup_s3_region: @@ -537,6 +547,8 @@ spec: type: string pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' default: "30 00 * * *" + logical_backup_cronjob_environment_secret: + type: string debug: type: object properties: @@ -648,7 +660,7 @@ spec: default: "pooler" connection_pooler_image: type: string - default: "registry.opensource.zalan.do/acid/pgbouncer:master-27" + default: "registry.opensource.zalan.do/acid/pgbouncer:master-32" connection_pooler_max_db_connections: type: integer default: 60 @@ -665,19 +677,15 @@ spec: connection_pooler_default_cpu_limit: type: string pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default: "1" connection_pooler_default_cpu_request: type: string pattern: '^(\d+m|\d+(\.\d{1,3})?)$' - default: "500m" connection_pooler_default_memory_limit: type: string pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default: "100Mi" connection_pooler_default_memory_request: type: string pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' - default: "100Mi" patroni: type: object properties: diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index 0ea7e3203..e3f77657e 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -19,7 +19,7 @@ spec: serviceAccountName: postgres-operator containers: - name: postgres-operator - image: registry.opensource.zalan.do/acid/postgres-operator:v1.10.1 + image: ghcr.io/zalando/postgres-operator:v1.14.0 imagePullPolicy: IfNotPresent resources: requests: diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 3a43a87bd..570ebd338 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -3,7 +3,7 @@ kind: OperatorConfiguration metadata: name: postgresql-operator-default-configuration configuration: - docker_image: ghcr.io/zalando/spilo-15:3.0-p1 + docker_image: ghcr.io/zalando/spilo-17:4.0-p2 # enable_crd_registration: true # crd_categories: # - all @@ -36,11 +36,11 @@ configuration: replication_username: standby super_username: postgres major_version_upgrade: - major_version_upgrade_mode: "off" + major_version_upgrade_mode: "manual" # major_version_upgrade_team_allow_list: # - acid - minimal_major_version: "11" - target_major_version: "15" + minimal_major_version: "13" + target_major_version: "17" kubernetes: # additional_pod_capabilities: # - "SYS_NICE" @@ -57,10 +57,14 @@ configuration: # - deployment-time # - downscaler/* # enable_cross_namespace_secret: "false" + enable_finalizers: false enable_init_containers: true + enable_owner_references: false + enable_persistent_volume_claim_deletion: true enable_pod_antiaffinity: false enable_pod_disruption_budget: true enable_readiness_probe: false + enable_secrets_deletion: true enable_sidecars: true # ignored_annotations: # - k8s.v1.cni.cncf.io/network-status @@ -83,6 +87,7 @@ configuration: # status: ready # node_readiness_label_merge: "OR" oauth_token_secret_name: postgresql-operator + pdb_master_label_selector: true pdb_name_format: "postgres-{cluster}-pdb" persistent_volume_claim_retention_policy: when_deleted: "retain" @@ -163,18 +168,20 @@ configuration: # logical_backup_cpu_request: "" # logical_backup_memory_limit: "" # logical_backup_memory_request: "" - logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.10.1" + logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" # logical_backup_google_application_credentials: "" logical_backup_job_prefix: "logical-backup-" logical_backup_provider: "s3" # logical_backup_s3_access_key_id: "" logical_backup_s3_bucket: "my-bucket-url" + # logical_backup_s3_bucket_prefix: "spilo" # logical_backup_s3_endpoint: "" # logical_backup_s3_region: "" # logical_backup_s3_secret_access_key: "" logical_backup_s3_sse: "AES256" # logical_backup_s3_retention_time: "" logical_backup_schedule: "30 00 * * *" + # logical_backup_cronjob_environment_secret: "" debug: debug_logging: true enable_database_access: true @@ -206,7 +213,7 @@ configuration: connection_pooler_default_cpu_request: "500m" connection_pooler_default_memory_limit: 100Mi connection_pooler_default_memory_request: 100Mi - connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-27" + connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-32" # connection_pooler_max_db_connections: 60 connection_pooler_mode: "transaction" connection_pooler_number_of_instances: 2 diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 5f5b6ff09..39d751cef 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -85,10 +85,14 @@ spec: - mountPath - volumeSource properties: + isSubPathExpr: + type: boolean name: type: string mountPath: type: string + subPath: + type: string targetContainers: type: array nullable: true @@ -97,8 +101,6 @@ spec: volumeSource: type: object x-kubernetes-preserve-unknown-fields: true - subPath: - type: string allowedSourceRanges: type: array nullable: true @@ -213,6 +215,8 @@ spec: items: type: object x-kubernetes-preserve-unknown-fields: true + logicalBackupRetention: + type: string logicalBackupSchedule: type: string pattern: '^(\d+|\*)(/\d+)?(\s+(\d+|\*)(/\d+)?){4}$' @@ -220,7 +224,7 @@ spec: type: array items: type: string - pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' + pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' masterServiceAnnotations: type: object additionalProperties: @@ -369,12 +373,11 @@ spec: version: type: string enum: - - "10" - - "11" - - "12" - "13" - "14" - "15" + - "16" + - "17" parameters: type: object additionalProperties: @@ -439,6 +442,12 @@ spec: pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' # Note: the value specified here must not be zero or be higher # than the corresponding limit. + hugepages-2Mi: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + hugepages-1Gi: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' requests: type: object properties: @@ -448,6 +457,12 @@ spec: memory: type: string pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + hugepages-2Mi: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' + hugepages-1Gi: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' schedulerName: type: string serviceAnnotations: @@ -497,6 +512,9 @@ spec: type: string batchSize: type: integer + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' database: type: string enableRecovery: @@ -505,6 +523,9 @@ spec: type: object additionalProperties: type: string + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' tables: type: object additionalProperties: @@ -516,6 +537,8 @@ spec: type: string idColumn: type: string + ignoreRecovery: + type: boolean payloadColumn: type: string recoveryEventType: @@ -598,6 +621,11 @@ spec: - SUPERUSER - nosuperuser - NOSUPERUSER + usersIgnoringSecretRotation: + type: array + nullable: true + items: + type: string usersWithInPlaceSecretRotation: type: array nullable: true @@ -613,6 +641,8 @@ spec: required: - size properties: + isSubPathExpr: + type: boolean iops: type: integer selector: diff --git a/manifests/standby-manifest.yaml b/manifests/standby-manifest.yaml index 2db4d489b..eb90464a6 100644 --- a/manifests/standby-manifest.yaml +++ b/manifests/standby-manifest.yaml @@ -8,7 +8,7 @@ spec: size: 1Gi numberOfInstances: 1 postgresql: - version: "15" + version: "17" # Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming. standby: # s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/" diff --git a/pkg/apis/acid.zalan.do/v1/const.go b/pkg/apis/acid.zalan.do/v1/const.go index 3cb1c1ade..4102ea3d3 100644 --- a/pkg/apis/acid.zalan.do/v1/const.go +++ b/pkg/apis/acid.zalan.do/v1/const.go @@ -1,6 +1,6 @@ package v1 -// ClusterStatusUnknown etc : status of a Postgres cluster known to the operator +// ClusterStatusUnknown etc : status of a Postgres cluster known to the operator const ( ClusterStatusUnknown = "" ClusterStatusCreating = "Creating" diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 3d9f4f08d..3f6bf25d9 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -3,10 +3,11 @@ package v1 import ( "fmt" - acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" - "github.com/zalando/postgres-operator/pkg/util" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + acidzalando "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do" + "github.com/zalando/postgres-operator/pkg/util" ) // CRDResource* define names necesssary for the k8s CRD API @@ -145,12 +146,18 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ Type: "object", Required: []string{"name", "mountPath", "volumeSource"}, Properties: map[string]apiextv1.JSONSchemaProps{ + "isSubPathExpr": { + Type: "boolean", + }, "name": { Type: "string", }, "mountPath": { Type: "string", }, + "subPath": { + Type: "string", + }, "targetContainers": { Type: "array", Nullable: true, @@ -164,9 +171,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ Type: "object", XPreserveUnknownFields: util.True(), }, - "subPath": { - Type: "string", - }, }, }, }, @@ -342,6 +346,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "logicalBackupRetention": { + Type: "string", + }, "logicalBackupSchedule": { Type: "string", Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$", @@ -589,22 +596,19 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ Type: "string", Enum: []apiextv1.JSON{ { - Raw: []byte(`"10"`), - }, - { - Raw: []byte(`"11"`), + Raw: []byte(`"13"`), }, { - Raw: []byte(`"12"`), + Raw: []byte(`"14"`), }, { - Raw: []byte(`"13"`), + Raw: []byte(`"15"`), }, { - Raw: []byte(`"14"`), + Raw: []byte(`"16"`), }, { - Raw: []byte(`"15"`), + Raw: []byte(`"17"`), }, }, }, @@ -684,6 +688,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ Type: "string", Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", }, + "hugepages-2Mi": { + Type: "string", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + }, + "hugepages-1Gi": { + Type: "string", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + }, }, }, "requests": { @@ -697,6 +709,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ Type: "string", Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", }, + "hugepages-2Mi": { + Type: "string", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + }, + "hugepages-1Gi": { + Type: "string", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + }, }, }, }, @@ -979,6 +999,15 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "usersIgnoringSecretRotation": { + Type: "array", + Nullable: true, + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "usersWithInPlaceSecretRotation": { Type: "array", Nullable: true, @@ -1001,6 +1030,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ Type: "object", Required: []string{"size"}, Properties: map[string]apiextv1.JSONSchemaProps{ + "isSubPathExpr": { + Type: "boolean", + }, "iops": { Type: "integer", }, @@ -1132,7 +1164,8 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ Type: "boolean", }, "enable_spilo_wal_path_compat": { - Type: "boolean", + Type: "boolean", + Description: "deprecated", }, "enable_team_id_clustername_prefix": { Type: "boolean", @@ -1288,9 +1321,18 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ "enable_cross_namespace_secret": { Type: "boolean", }, + "enable_finalizers": { + Type: "boolean", + }, "enable_init_containers": { Type: "boolean", }, + "enable_owner_references": { + Type: "boolean", + }, + "enable_persistent_volume_claim_deletion": { + Type: "boolean", + }, "enable_pod_antiaffinity": { Type: "boolean", }, @@ -1300,6 +1342,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ "enable_readiness_probe": { Type: "boolean", }, + "enable_secrets_deletion": { + Type: "boolean", + }, "enable_sidecars": { Type: "boolean", }, @@ -1394,6 +1439,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ "pdb_name_format": { Type: "string", }, + "pdb_master_label_selector": { + Type: "boolean", + }, "persistent_volume_claim_retention_policy": { Type: "object", Properties: map[string]apiextv1.JSONSchemaProps{ @@ -1526,35 +1574,35 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ Properties: map[string]apiextv1.JSONSchemaProps{ "default_cpu_limit": { Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", + Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$", }, "default_cpu_request": { Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", + Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$", }, "default_memory_limit": { Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$", }, "default_memory_request": { Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$", }, "max_cpu_request": { Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", + Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$", }, "max_memory_request": { Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$", }, "min_cpu_limit": { Type: "string", - Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$", + Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$", }, "min_memory_limit": { Type: "string", - Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$", + Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$", }, }, }, @@ -1727,6 +1775,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ "logical_backup_s3_bucket": { Type: "string", }, + "logical_backup_s3_bucket_prefix": { + Type: "string", + }, "logical_backup_s3_endpoint": { Type: "string", }, @@ -1746,6 +1797,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ Type: "string", Pattern: "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$", }, + "logical_backup_cronjob_environment_secret": { + Type: "string", + }, }, }, "debug": { diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index afc22cb5d..cd11b9173 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -47,14 +47,15 @@ type PostgresUsersConfiguration struct { // MajorVersionUpgradeConfiguration defines how to execute major version upgrades of Postgres. type MajorVersionUpgradeConfiguration struct { - MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"off"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade + MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"manual"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"` - MinimalMajorVersion string `json:"minimal_major_version" default:"11"` - TargetMajorVersion string `json:"target_major_version" default:"15"` + MinimalMajorVersion string `json:"minimal_major_version" default:"13"` + TargetMajorVersion string `json:"target_major_version" default:"17"` } // KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself type KubernetesMetaConfiguration struct { + EnableOwnerReferences *bool `json:"enable_owner_references,omitempty"` PodServiceAccountName string `json:"pod_service_account_name,omitempty"` // TODO: change it to the proper json PodServiceAccountDefinition string `json:"pod_service_account_definition,omitempty"` @@ -68,6 +69,7 @@ type KubernetesMetaConfiguration struct { AdditionalPodCapabilities []string `json:"additional_pod_capabilities,omitempty"` WatchedNamespace string `json:"watched_namespace,omitempty"` PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` + PDBMasterLabelSelector *bool `json:"pdb_master_label_selector,omitempty"` EnablePodDisruptionBudget *bool `json:"enable_pod_disruption_budget,omitempty"` StorageResizeMode string `json:"storage_resize_mode,omitempty"` EnableInitContainers *bool `json:"enable_init_containers,omitempty"` @@ -101,8 +103,11 @@ type KubernetesMetaConfiguration struct { PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"` PodManagementPolicy string `json:"pod_management_policy,omitempty"` PersistentVolumeClaimRetentionPolicy map[string]string `json:"persistent_volume_claim_retention_policy,omitempty"` + EnableSecretsDeletion *bool `json:"enable_secrets_deletion,omitempty"` + EnablePersistentVolumeClaimDeletion *bool `json:"enable_persistent_volume_claim_deletion,omitempty"` EnableReadinessProbe bool `json:"enable_readiness_probe,omitempty"` EnableCrossNamespaceSecret bool `json:"enable_cross_namespace_secret,omitempty"` + EnableFinalizers *bool `json:"enable_finalizers,omitempty"` } // PostgresPodResourcesDefaults defines the spec of default resources @@ -155,7 +160,7 @@ type AWSGCPConfiguration struct { LogS3Bucket string `json:"log_s3_bucket,omitempty"` KubeIAMRole string `json:"kube_iam_role,omitempty"` AdditionalSecretMount string `json:"additional_secret_mount,omitempty"` - AdditionalSecretMountPath string `json:"additional_secret_mount_path" default:"/meta/credentials"` + AdditionalSecretMountPath string `json:"additional_secret_mount_path,omitempty"` EnableEBSGp3Migration bool `json:"enable_ebs_gp3_migration" default:"false"` EnableEBSGp3MigrationMaxSize int64 `json:"enable_ebs_gp3_migration_max_size" default:"1000"` } @@ -225,6 +230,7 @@ type OperatorLogicalBackupConfiguration struct { AzureStorageContainer string `json:"logical_backup_azure_storage_container,omitempty"` AzureStorageAccountKey string `json:"logical_backup_azure_storage_account_key,omitempty"` S3Bucket string `json:"logical_backup_s3_bucket,omitempty"` + S3BucketPrefix string `json:"logical_backup_s3_bucket_prefix,omitempty"` S3Region string `json:"logical_backup_s3_region,omitempty"` S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"` S3AccessKeyID string `json:"logical_backup_s3_access_key_id,omitempty"` @@ -233,6 +239,7 @@ type OperatorLogicalBackupConfiguration struct { RetentionTime string `json:"logical_backup_s3_retention_time,omitempty"` GoogleApplicationCredentials string `json:"logical_backup_google_application_credentials,omitempty"` JobPrefix string `json:"logical_backup_job_prefix,omitempty"` + CronjobEnvironmentSecret string `json:"logical_backup_cronjob_environment_secret,omitempty"` CPURequest string `json:"logical_backup_cpu_request,omitempty"` MemoryRequest string `json:"logical_backup_memory_request,omitempty"` CPULimit string `json:"logical_backup_cpu_limit,omitempty"` diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 3504d615d..ef6dfe7ff 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -59,26 +59,28 @@ type PostgresSpec struct { AllowedSourceRanges []string `json:"allowedSourceRanges"` Users map[string]UserFlags `json:"users,omitempty"` + UsersIgnoringSecretRotation []string `json:"usersIgnoringSecretRotation,omitempty"` UsersWithSecretRotation []string `json:"usersWithSecretRotation,omitempty"` UsersWithInPlaceSecretRotation []string `json:"usersWithInPlaceSecretRotation,omitempty"` - NumberOfInstances int32 `json:"numberOfInstances"` - MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` - Clone *CloneDescription `json:"clone,omitempty"` - Databases map[string]string `json:"databases,omitempty"` - PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"` - SchedulerName *string `json:"schedulerName,omitempty"` - NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"` - Tolerations []v1.Toleration `json:"tolerations,omitempty"` - Sidecars []Sidecar `json:"sidecars,omitempty"` - InitContainers []v1.Container `json:"initContainers,omitempty"` - PodPriorityClassName string `json:"podPriorityClassName,omitempty"` - ShmVolume *bool `json:"enableShmVolume,omitempty"` - EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` - LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` - StandbyCluster *StandbyDescription `json:"standby,omitempty"` - PodAnnotations map[string]string `json:"podAnnotations,omitempty"` - ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + NumberOfInstances int32 `json:"numberOfInstances"` + MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` + Clone *CloneDescription `json:"clone,omitempty"` + Databases map[string]string `json:"databases,omitempty"` + PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"` + SchedulerName *string `json:"schedulerName,omitempty"` + NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"` + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + Sidecars []Sidecar `json:"sidecars,omitempty"` + InitContainers []v1.Container `json:"initContainers,omitempty"` + PodPriorityClassName string `json:"podPriorityClassName,omitempty"` + ShmVolume *bool `json:"enableShmVolume,omitempty"` + EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` + LogicalBackupRetention string `json:"logicalBackupRetention,omitempty"` + LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` + StandbyCluster *StandbyDescription `json:"standby,omitempty"` + PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` // MasterServiceAnnotations takes precedence over ServiceAnnotations for master role if not empty MasterServiceAnnotations map[string]string `json:"masterServiceAnnotations,omitempty"` // ReplicaServiceAnnotations takes precedence over ServiceAnnotations for replica role if not empty @@ -127,13 +129,14 @@ type MaintenanceWindow struct { // Volume describes a single volume in the manifest. type Volume struct { - Selector *metav1.LabelSelector `json:"selector,omitempty"` - Size string `json:"size"` - StorageClass string `json:"storageClass,omitempty"` - SubPath string `json:"subPath,omitempty"` - Iops *int64 `json:"iops,omitempty"` - Throughput *int64 `json:"throughput,omitempty"` - VolumeType string `json:"type,omitempty"` + Selector *metav1.LabelSelector `json:"selector,omitempty"` + Size string `json:"size"` + StorageClass string `json:"storageClass,omitempty"` + SubPath string `json:"subPath,omitempty"` + IsSubPathExpr *bool `json:"isSubPathExpr,omitempty"` + Iops *int64 `json:"iops,omitempty"` + Throughput *int64 `json:"throughput,omitempty"` + VolumeType string `json:"type,omitempty"` } // AdditionalVolume specs additional optional volumes for statefulset @@ -141,6 +144,7 @@ type AdditionalVolume struct { Name string `json:"name"` MountPath string `json:"mountPath"` SubPath string `json:"subPath,omitempty"` + IsSubPathExpr *bool `json:"isSubPathExpr,omitempty"` TargetContainers []string `json:"targetContainers"` VolumeSource v1.VolumeSource `json:"volumeSource"` } @@ -153,8 +157,10 @@ type PostgresqlParam struct { // ResourceDescription describes CPU and memory resources defined for a cluster. type ResourceDescription struct { - CPU string `json:"cpu"` - Memory string `json:"memory"` + CPU *string `json:"cpu,omitempty"` + Memory *string `json:"memory,omitempty"` + HugePages2Mi *string `json:"hugepages-2Mi,omitempty"` + HugePages1Gi *string `json:"hugepages-1Gi,omitempty"` } // Resources describes requests and limits for the cluster resouces. @@ -214,6 +220,7 @@ type Sidecar struct { DockerImage string `json:"image,omitempty"` Ports []v1.ContainerPort `json:"ports,omitempty"` Env []v1.EnvVar `json:"env,omitempty"` + Command []string `json:"command,omitempty"` } // UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users @@ -252,13 +259,16 @@ type Stream struct { Tables map[string]StreamTable `json:"tables"` Filter map[string]*string `json:"filter,omitempty"` BatchSize *uint32 `json:"batchSize,omitempty"` + CPU *string `json:"cpu,omitempty"` + Memory *string `json:"memory,omitempty"` EnableRecovery *bool `json:"enableRecovery,omitempty"` } // StreamTable defines properties of outbox tables for FabricEventStreams type StreamTable struct { EventType string `json:"eventType"` - RecoveryEventType string `json:"recoveryEventType"` + RecoveryEventType string `json:"recoveryEventType,omitempty"` + IgnoreRecovery *bool `json:"ignoreRecovery,omitempty"` IdColumn *string `json:"idColumn,omitempty"` PayloadColumn *string `json:"payloadColumn,omitempty"` } diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index 71a629f31..5e4913ffe 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -26,6 +26,10 @@ var parseTimeTests = []struct { {"expect error as minute is out of range", "23:69", metav1.Now(), errors.New(`parsing time "23:69": minute out of range`)}, } +func stringToPointer(str string) *string { + return &str +} + var parseWeekdayTests = []struct { about string in string @@ -119,6 +123,8 @@ var maintenanceWindows = []struct { {"expect error as weekday is empty", []byte(`":00:00-10:00"`), MaintenanceWindow{}, errors.New(`could not parse weekday: incorrect weekday`)}, {"expect error as maintenance window set seconds", []byte(`"Mon:00:00:00-10:00:00"`), MaintenanceWindow{}, errors.New(`incorrect maintenance window format`)}, {"expect error as 'To' time set seconds", []byte(`"Mon:00:00-00:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")}, + // ideally, should be implemented + {"expect error as 'To' has a weekday", []byte(`"Mon:00:00-Fri:00:00"`), MaintenanceWindow{}, errors.New("could not parse end time: incorrect time format")}, {"expect error as 'To' time is missing", []byte(`"Mon:00:00"`), MaintenanceWindow{}, errors.New("incorrect maintenance window format")}} var postgresStatus = []struct { @@ -213,7 +219,7 @@ var unmarshalCluster = []struct { "127.0.0.1/32" ], "postgresql": { - "version": "15", + "version": "17", "parameters": { "shared_buffers": "32MB", "max_connections": "10", @@ -273,7 +279,7 @@ var unmarshalCluster = []struct { }, Spec: PostgresSpec{ PostgresqlParam: PostgresqlParam{ - PgVersion: "15", + PgVersion: "17", Parameters: map[string]string{ "shared_buffers": "32MB", "max_connections": "10", @@ -301,8 +307,8 @@ var unmarshalCluster = []struct { Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}}, }, Resources: &Resources{ - ResourceRequests: ResourceDescription{CPU: "10m", Memory: "50Mi"}, - ResourceLimits: ResourceDescription{CPU: "300m", Memory: "3000Mi"}, + ResourceRequests: ResourceDescription{CPU: stringToPointer("10m"), Memory: stringToPointer("50Mi")}, + ResourceLimits: ResourceDescription{CPU: stringToPointer("300m"), Memory: stringToPointer("3000Mi")}, }, TeamID: "acid", @@ -333,7 +339,7 @@ var unmarshalCluster = []struct { }, Error: "", }, - marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"15","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`), + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"17","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`), err: nil}, { about: "example with clone", @@ -398,7 +404,7 @@ var postgresqlList = []struct { out PostgresqlList err error }{ - {"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"15"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`), + {"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"17"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`), PostgresqlList{ TypeMeta: metav1.TypeMeta{ Kind: "List", @@ -419,7 +425,7 @@ var postgresqlList = []struct { }, Spec: PostgresSpec{ ClusterName: "testcluster42", - PostgresqlParam: PostgresqlParam{PgVersion: "15"}, + PostgresqlParam: PostgresqlParam{PgVersion: "17"}, Volume: Volume{Size: "10Gi"}, TeamID: "acid", AllowedSourceRanges: []string{"185.85.220.0/22"}, diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 536feec73..5d0a5b341 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -53,6 +53,11 @@ func (in *AWSGCPConfiguration) DeepCopy() *AWSGCPConfiguration { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdditionalVolume) DeepCopyInto(out *AdditionalVolume) { *out = *in + if in.IsSubPathExpr != nil { + in, out := &in.IsSubPathExpr, &out.IsSubPathExpr + *out = new(bool) + **out = **in + } if in.TargetContainers != nil { in, out := &in.TargetContainers, &out.TargetContainers *out = make([]string, len(*in)) @@ -109,7 +114,7 @@ func (in *ConnectionPooler) DeepCopyInto(out *ConnectionPooler) { if in.Resources != nil { in, out := &in.Resources, &out.Resources *out = new(Resources) - **out = **in + (*in).DeepCopyInto(*out) } return } @@ -153,6 +158,11 @@ func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfigurati // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) { *out = *in + if in.EnableOwnerReferences != nil { + in, out := &in.EnableOwnerReferences, &out.EnableOwnerReferences + *out = new(bool) + **out = **in + } if in.SpiloAllowPrivilegeEscalation != nil { in, out := &in.SpiloAllowPrivilegeEscalation, &out.SpiloAllowPrivilegeEscalation *out = new(bool) @@ -178,6 +188,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura *out = make([]string, len(*in)) copy(*out, *in) } + if in.PDBMasterLabelSelector != nil { + in, out := &in.PDBMasterLabelSelector, &out.PDBMasterLabelSelector + *out = new(bool) + **out = **in + } if in.EnablePodDisruptionBudget != nil { in, out := &in.EnablePodDisruptionBudget, &out.EnablePodDisruptionBudget *out = new(bool) @@ -267,6 +282,21 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura (*out)[key] = val } } + if in.EnableSecretsDeletion != nil { + in, out := &in.EnableSecretsDeletion, &out.EnableSecretsDeletion + *out = new(bool) + **out = **in + } + if in.EnablePersistentVolumeClaimDeletion != nil { + in, out := &in.EnablePersistentVolumeClaimDeletion, &out.EnablePersistentVolumeClaimDeletion + *out = new(bool) + **out = **in + } + if in.EnableFinalizers != nil { + in, out := &in.EnableFinalizers, &out.EnableFinalizers + *out = new(bool) + **out = **in + } return } @@ -626,7 +656,7 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { if in.Resources != nil { in, out := &in.Resources, &out.Resources *out = new(Resources) - **out = **in + (*in).DeepCopyInto(*out) } if in.EnableConnectionPooler != nil { in, out := &in.EnableConnectionPooler, &out.EnableConnectionPooler @@ -708,6 +738,11 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) { (*out)[key] = outVal } } + if in.UsersIgnoringSecretRotation != nil { + in, out := &in.UsersIgnoringSecretRotation, &out.UsersIgnoringSecretRotation + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.UsersWithSecretRotation != nil { in, out := &in.UsersWithSecretRotation, &out.UsersWithSecretRotation *out = make([]string, len(*in)) @@ -1155,6 +1190,26 @@ func (in *PreparedSchema) DeepCopy() *PreparedSchema { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) { *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } + if in.HugePages2Mi != nil { + in, out := &in.HugePages2Mi, &out.HugePages2Mi + *out = new(string) + **out = **in + } + if in.HugePages1Gi != nil { + in, out := &in.HugePages1Gi, &out.HugePages1Gi + *out = new(string) + **out = **in + } return } @@ -1171,8 +1226,8 @@ func (in *ResourceDescription) DeepCopy() *ResourceDescription { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Resources) DeepCopyInto(out *Resources) { *out = *in - out.ResourceRequests = in.ResourceRequests - out.ResourceLimits = in.ResourceLimits + in.ResourceRequests.DeepCopyInto(&out.ResourceRequests) + in.ResourceLimits.DeepCopyInto(&out.ResourceLimits) return } @@ -1208,7 +1263,7 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) { if in.Resources != nil { in, out := &in.Resources, &out.Resources *out = new(Resources) - **out = **in + (*in).DeepCopyInto(*out) } if in.Ports != nil { in, out := &in.Ports, &out.Ports @@ -1222,6 +1277,11 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -1281,6 +1341,16 @@ func (in *Stream) DeepCopyInto(out *Stream) { *out = new(uint32) **out = **in } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } if in.EnableRecovery != nil { in, out := &in.EnableRecovery, &out.EnableRecovery *out = new(bool) @@ -1302,6 +1372,11 @@ func (in *Stream) DeepCopy() *Stream { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StreamTable) DeepCopyInto(out *StreamTable) { *out = *in + if in.IgnoreRecovery != nil { + in, out := &in.IgnoreRecovery, &out.IgnoreRecovery + *out = new(bool) + **out = **in + } if in.IdColumn != nil { in, out := &in.IdColumn, &out.IdColumn *out = new(string) @@ -1402,6 +1477,11 @@ func (in *Volume) DeepCopyInto(out *Volume) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.IsSubPathExpr != nil { + in, out := &in.IsSubPathExpr, &out.IsSubPathExpr + *out = new(bool) + **out = **in + } if in.Iops != nil { in, out := &in.Iops, &out.Iops *out = new(int64) diff --git a/pkg/apis/zalando.org/v1/fabriceventstream.go b/pkg/apis/zalando.org/v1/fabriceventstream.go index 609f3c9bc..41bb5e80c 100644 --- a/pkg/apis/zalando.org/v1/fabriceventstream.go +++ b/pkg/apis/zalando.org/v1/fabriceventstream.go @@ -1,6 +1,7 @@ package v1 import ( + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -89,3 +90,8 @@ type DBAuth struct { UserKey string `json:"userKey,omitempty"` PasswordKey string `json:"passwordKey,omitempty"` } + +type Slot struct { + Slot map[string]string `json:"slot"` + Publication map[string]acidv1.StreamTable `json:"publication"` +} diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 992d665b2..e9a691faa 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -3,7 +3,6 @@ package cluster // Postgres CustomResourceDefinition object i.e. Spilo import ( - "context" "database/sql" "encoding/json" "fmt" @@ -15,6 +14,7 @@ import ( "github.com/sirupsen/logrus" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + zalandov1 "github.com/zalando/postgres-operator/pkg/apis/zalando.org/v1" "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/scheme" "github.com/zalando/postgres-operator/pkg/spec" @@ -28,6 +28,7 @@ import ( "github.com/zalando/postgres-operator/pkg/util/users" "github.com/zalando/postgres-operator/pkg/util/volumes" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -44,6 +45,7 @@ var ( databaseNameRegexp = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") userRegexp = regexp.MustCompile(`^[a-z0-9]([-_a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-_a-z0-9]*[a-z0-9])?)*$`) patroniObjectSuffixes = []string{"leader", "config", "sync", "failover"} + finalizerName = "postgres-operator.acid.zalan.do" ) // Config contains operator-wide clients and configuration used from a cluster. TODO: remove struct duplication. @@ -57,13 +59,18 @@ type Config struct { } type kubeResources struct { - Services map[PostgresRole]*v1.Service - Endpoints map[PostgresRole]*v1.Endpoints - Secrets map[types.UID]*v1.Secret - Statefulset *appsv1.StatefulSet - PodDisruptionBudget *policyv1.PodDisruptionBudget + Services map[PostgresRole]*v1.Service + Endpoints map[PostgresRole]*v1.Endpoints + PatroniEndpoints map[string]*v1.Endpoints + PatroniConfigMaps map[string]*v1.ConfigMap + Secrets map[types.UID]*v1.Secret + Statefulset *appsv1.StatefulSet + VolumeClaims map[types.UID]*v1.PersistentVolumeClaim + PrimaryPodDisruptionBudget *policyv1.PodDisruptionBudget + CriticalOpPodDisruptionBudget *policyv1.PodDisruptionBudget + LogicalBackupJob *batchv1.CronJob + Streams map[string]*zalandov1.FabricEventStream //Pods are treated separately - //PVCs are treated separately } // Cluster describes postgresql cluster @@ -99,10 +106,17 @@ type Cluster struct { } type compareStatefulsetResult struct { - match bool - replace bool - rollingUpdate bool - reasons []string + match bool + replace bool + rollingUpdate bool + reasons []string + deletedPodAnnotations []string +} + +type compareLogicalBackupJobResult struct { + match bool + reasons []string + deletedPodAnnotations []string } // New creates a new cluster. This function should be called from a controller. @@ -129,9 +143,13 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres systemUsers: make(map[string]spec.PgUser), podSubscribers: make(map[spec.NamespacedName]chan PodEvent), kubeResources: kubeResources{ - Secrets: make(map[types.UID]*v1.Secret), - Services: make(map[PostgresRole]*v1.Service), - Endpoints: make(map[PostgresRole]*v1.Endpoints)}, + Secrets: make(map[types.UID]*v1.Secret), + Services: make(map[PostgresRole]*v1.Service), + Endpoints: make(map[PostgresRole]*v1.Endpoints), + PatroniEndpoints: make(map[string]*v1.Endpoints), + PatroniConfigMaps: make(map[string]*v1.ConfigMap), + VolumeClaims: make(map[types.UID]*v1.PersistentVolumeClaim), + Streams: make(map[string]*zalandov1.FabricEventStream)}, userSyncStrategy: users.DefaultUserSyncStrategy{ PasswordEncryption: passwordEncryption, RoleDeletionSuffix: cfg.OpConfig.RoleDeletionSuffix, @@ -246,20 +264,42 @@ func (c *Cluster) Create() (err error) { defer c.mu.Unlock() var ( - service *v1.Service - ep *v1.Endpoints - ss *appsv1.StatefulSet + pgCreateStatus *acidv1.Postgresql + service *v1.Service + ep *v1.Endpoints + ss *appsv1.StatefulSet ) defer func() { + var ( + pgUpdatedStatus *acidv1.Postgresql + errStatus error + ) if err == nil { - c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running? + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) //TODO: are you sure it's running? } else { - c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed) + c.logger.Warningf("cluster created failed: %v", err) + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusAddFailed) + } + if errStatus != nil { + c.logger.Warningf("could not set cluster status: %v", errStatus) + } + if pgUpdatedStatus != nil { + c.setSpec(pgUpdatedStatus) } }() - c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating) + pgCreateStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusCreating) + if err != nil { + return fmt.Errorf("could not set cluster status: %v", err) + } + c.setSpec(pgCreateStatus) + + if c.OpConfig.EnableFinalizers != nil && *c.OpConfig.EnableFinalizers { + if err = c.addFinalizer(); err != nil { + return fmt.Errorf("could not add finalizer: %v", err) + } + } c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Create", "Started creation of new cluster resources") for _, role := range []PostgresRole{Master, Replica} { @@ -304,14 +344,10 @@ func (c *Cluster) Create() (err error) { c.logger.Infof("secrets have been successfully created") c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Secrets", "The secrets have been successfully created") - if c.PodDisruptionBudget != nil { - return fmt.Errorf("pod disruption budget already exists in the cluster") - } - pdb, err := c.createPodDisruptionBudget() - if err != nil { - return fmt.Errorf("could not create pod disruption budget: %v", err) + if err = c.createPodDisruptionBudgets(); err != nil { + return fmt.Errorf("could not create pod disruption budgets: %v", err) } - c.logger.Infof("pod disruption budget %q has been successfully created", util.NameFromMeta(pdb.ObjectMeta)) + c.logger.Info("pod disruption budgets have been successfully created") if c.Statefulset != nil { return fmt.Errorf("statefulset already exists in the cluster") @@ -332,6 +368,16 @@ func (c *Cluster) Create() (err error) { c.logger.Infof("pods are ready") c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Pods are ready") + // sync volume may already transition volumes to gp3, if iops/throughput or type is specified + if err = c.syncVolumes(); err != nil { + return err + } + + // sync resources created by Patroni + if err = c.syncPatroniResources(); err != nil { + c.logger.Warnf("Patroni resources not yet synced: %v", err) + } + // create database objects unless we are running without pods or disabled // that feature explicitly if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) { @@ -357,10 +403,6 @@ func (c *Cluster) Create() (err error) { c.logger.Info("a k8s cron job for logical backup has been successfully created") } - if err := c.listResources(); err != nil { - c.logger.Errorf("could not list resources: %v", err) - } - // Create connection pooler deployment and services if necessary. Since we // need to perform some operations with the database itself (e.g. install // lookup function), do it as the last step, when everything is available. @@ -385,10 +427,15 @@ func (c *Cluster) Create() (err error) { } } + if err := c.listResources(); err != nil { + c.logger.Errorf("could not list resources: %v", err) + } + return nil } func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compareStatefulsetResult { + deletedPodAnnotations := []string{} reasons := make([]string, 0) var match, needsRollUpdate, needsReplace bool @@ -398,7 +445,12 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa match = false reasons = append(reasons, "new statefulset's number of replicas does not match the current one") } - if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations); changed { + if !reflect.DeepEqual(c.Statefulset.OwnerReferences, statefulSet.OwnerReferences) { + match = false + needsReplace = true + reasons = append(reasons, "new statefulset's ownerReferences do not match") + } + if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations, nil); changed { match = false needsReplace = true reasons = append(reasons, "new statefulset's annotations do not match: "+reason) @@ -409,14 +461,20 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa reasons = append(reasons, "new statefulset's pod management policy do not match") } + if c.Statefulset.Spec.PersistentVolumeClaimRetentionPolicy == nil { + c.Statefulset.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{ + WhenDeleted: appsv1.RetainPersistentVolumeClaimRetentionPolicyType, + WhenScaled: appsv1.RetainPersistentVolumeClaimRetentionPolicyType, + } + } if !reflect.DeepEqual(c.Statefulset.Spec.PersistentVolumeClaimRetentionPolicy, statefulSet.Spec.PersistentVolumeClaimRetentionPolicy) { match = false needsReplace = true reasons = append(reasons, "new statefulset's persistent volume claim retention policy do not match") } - needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons) - needsRollUpdate, reasons = c.compareContainers("containers", c.Statefulset.Spec.Template.Spec.Containers, statefulSet.Spec.Template.Spec.Containers, needsRollUpdate, reasons) + needsRollUpdate, reasons = c.compareContainers("statefulset initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons) + needsRollUpdate, reasons = c.compareContainers("statefulset containers", c.Statefulset.Spec.Template.Spec.Containers, statefulSet.Spec.Template.Spec.Containers, needsRollUpdate, reasons) if len(c.Statefulset.Spec.Template.Spec.Containers) == 0 { c.logger.Warningf("statefulset %q has no container", util.NameFromMeta(c.Statefulset.ObjectMeta)) @@ -466,10 +524,9 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa } } - if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations); changed { + if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations, &deletedPodAnnotations); changed { match = false needsReplace = true - needsRollUpdate = true reasons = append(reasons, "new statefulset's pod template metadata annotations does not match "+reason) } if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.SecurityContext, statefulSet.Spec.Template.Spec.SecurityContext) { @@ -480,23 +537,24 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa if len(c.Statefulset.Spec.VolumeClaimTemplates) != len(statefulSet.Spec.VolumeClaimTemplates) { needsReplace = true reasons = append(reasons, "new statefulset's volumeClaimTemplates contains different number of volumes to the old one") - } - for i := 0; i < len(c.Statefulset.Spec.VolumeClaimTemplates); i++ { - name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name - // Some generated fields like creationTimestamp make it not possible to use DeepCompare on ObjectMeta - if name != statefulSet.Spec.VolumeClaimTemplates[i].Name { - needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i)) - continue - } - if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations) { - needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q does not match the current one", name)) - } - if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) { + } else { + for i := 0; i < len(c.Statefulset.Spec.VolumeClaimTemplates); i++ { name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name - needsReplace = true - reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q does not match the current one", name)) + // Some generated fields like creationTimestamp make it not possible to use DeepCompare on ObjectMeta + if name != statefulSet.Spec.VolumeClaimTemplates[i].Name { + needsReplace = true + reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i)) + continue + } + if changed, reason := c.compareAnnotations(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations, nil); changed { + needsReplace = true + reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q do not match the current ones: %s", name, reason)) + } + if !reflect.DeepEqual(c.Statefulset.Spec.VolumeClaimTemplates[i].Spec, statefulSet.Spec.VolumeClaimTemplates[i].Spec) { + name := c.Statefulset.Spec.VolumeClaimTemplates[i].Name + needsReplace = true + reasons = append(reasons, fmt.Sprintf("new statefulset's volumeClaimTemplates specification for volume %q does not match the current one", name)) + } } } @@ -526,7 +584,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa match = false } - return &compareStatefulsetResult{match: match, reasons: reasons, rollingUpdate: needsRollUpdate, replace: needsReplace} + return &compareStatefulsetResult{match: match, reasons: reasons, rollingUpdate: needsRollUpdate, replace: needsReplace, deletedPodAnnotations: deletedPodAnnotations} } type containerCondition func(a, b v1.Container) bool @@ -547,30 +605,30 @@ func newCheck(msg string, cond containerCondition) containerCheck { func (c *Cluster) compareContainers(description string, setA, setB []v1.Container, needsRollUpdate bool, reasons []string) (bool, []string) { if len(setA) != len(setB) { - return true, append(reasons, fmt.Sprintf("new statefulset %s's length does not match the current ones", description)) + return true, append(reasons, fmt.Sprintf("new %s's length does not match the current ones", description)) } checks := []containerCheck{ - newCheck("new statefulset %s's %s (index %d) name does not match the current one", + newCheck("new %s's %s (index %d) name does not match the current one", func(a, b v1.Container) bool { return a.Name != b.Name }), - newCheck("new statefulset %s's %s (index %d) readiness probe does not match the current one", + newCheck("new %s's %s (index %d) readiness probe does not match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.ReadinessProbe, b.ReadinessProbe) }), - newCheck("new statefulset %s's %s (index %d) ports do not match the current one", + newCheck("new %s's %s (index %d) ports do not match the current one", func(a, b v1.Container) bool { return !comparePorts(a.Ports, b.Ports) }), - newCheck("new statefulset %s's %s (index %d) resources do not match the current ones", + newCheck("new %s's %s (index %d) resources do not match the current ones", func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }), - newCheck("new statefulset %s's %s (index %d) environment does not match the current one", + newCheck("new %s's %s (index %d) environment does not match the current one", func(a, b v1.Container) bool { return !compareEnv(a.Env, b.Env) }), - newCheck("new statefulset %s's %s (index %d) environment sources do not match the current one", + newCheck("new %s's %s (index %d) environment sources do not match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }), - newCheck("new statefulset %s's %s (index %d) security context does not match the current one", + newCheck("new %s's %s (index %d) security context does not match the current one", func(a, b v1.Container) bool { return !reflect.DeepEqual(a.SecurityContext, b.SecurityContext) }), - newCheck("new statefulset %s's %s (index %d) volume mounts do not match the current one", - func(a, b v1.Container) bool { return !reflect.DeepEqual(a.VolumeMounts, b.VolumeMounts) }), + newCheck("new %s's %s (index %d) volume mounts do not match the current one", + func(a, b v1.Container) bool { return !compareVolumeMounts(a.VolumeMounts, b.VolumeMounts) }), } if !c.OpConfig.EnableLazySpiloUpgrade { - checks = append(checks, newCheck("new statefulset %s's %s (index %d) image does not match the current one", + checks = append(checks, newCheck("new %s's %s (index %d) image does not match the current one", func(a, b v1.Container) bool { return a.Image != b.Image })) } @@ -621,7 +679,7 @@ func compareEnv(a, b []v1.EnvVar) bool { if len(a) != len(b) { return false } - equal := true + var equal bool for _, enva := range a { hasmatch := false for _, envb := range b { @@ -707,7 +765,28 @@ func comparePorts(a, b []v1.ContainerPort) bool { return true } -func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) { +func compareVolumeMounts(old, new []v1.VolumeMount) bool { + if len(old) != len(new) { + return false + } + for _, mount := range old { + if !volumeMountExists(mount, new) { + return false + } + } + return true +} + +func volumeMountExists(mount v1.VolumeMount, mounts []v1.VolumeMount) bool { + for _, m := range mounts { + if reflect.DeepEqual(mount, m) { + return true + } + } + return false +} + +func (c *Cluster) compareAnnotations(old, new map[string]string, removedList *[]string) (bool, string) { reason := "" ignoredAnnotations := make(map[string]bool) for _, ignore := range c.OpConfig.IgnoredAnnotations { @@ -720,6 +799,9 @@ func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) } if _, ok := new[key]; !ok { reason += fmt.Sprintf(" Removed %q.", key) + if removedList != nil { + *removedList = append(*removedList, key) + } } } @@ -755,13 +837,127 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) { } } - if changed, reason := c.compareAnnotations(old.Annotations, new.Annotations); changed { - return !changed, "new service's annotations does not match the current one:" + reason + if !reflect.DeepEqual(old.ObjectMeta.OwnerReferences, new.ObjectMeta.OwnerReferences) { + return false, "new service's owner references do not match the current ones" + } + + return true, "" +} + +func (c *Cluster) compareLogicalBackupJob(cur, new *batchv1.CronJob) *compareLogicalBackupJobResult { + deletedPodAnnotations := []string{} + reasons := make([]string, 0) + match := true + + if cur.Spec.Schedule != new.Spec.Schedule { + match = false + reasons = append(reasons, fmt.Sprintf("new job's schedule %q does not match the current one %q", new.Spec.Schedule, cur.Spec.Schedule)) + } + + newImage := new.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image + curImage := cur.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image + if newImage != curImage { + match = false + reasons = append(reasons, fmt.Sprintf("new job's image %q does not match the current one %q", newImage, curImage)) } + newPodAnnotation := new.Spec.JobTemplate.Spec.Template.Annotations + curPodAnnotation := cur.Spec.JobTemplate.Spec.Template.Annotations + if changed, reason := c.compareAnnotations(curPodAnnotation, newPodAnnotation, &deletedPodAnnotations); changed { + match = false + reasons = append(reasons, fmt.Sprint("new job's pod template metadata annotations do not match "+reason)) + } + + newPgVersion := getPgVersion(new) + curPgVersion := getPgVersion(cur) + if newPgVersion != curPgVersion { + match = false + reasons = append(reasons, fmt.Sprintf("new job's env PG_VERSION %q does not match the current one %q", newPgVersion, curPgVersion)) + } + + needsReplace := false + contReasons := make([]string, 0) + needsReplace, contReasons = c.compareContainers("cronjob container", cur.Spec.JobTemplate.Spec.Template.Spec.Containers, new.Spec.JobTemplate.Spec.Template.Spec.Containers, needsReplace, contReasons) + if needsReplace { + match = false + reasons = append(reasons, fmt.Sprintf("logical backup container specs do not match: %v", strings.Join(contReasons, `', '`))) + } + + return &compareLogicalBackupJobResult{match: match, reasons: reasons, deletedPodAnnotations: deletedPodAnnotations} +} + +func (c *Cluster) comparePodDisruptionBudget(cur, new *policyv1.PodDisruptionBudget) (bool, string) { + //TODO: improve comparison + if !reflect.DeepEqual(new.Spec, cur.Spec) { + return false, "new PDB's spec does not match the current one" + } + if !reflect.DeepEqual(new.ObjectMeta.OwnerReferences, cur.ObjectMeta.OwnerReferences) { + return false, "new PDB's owner references do not match the current ones" + } + if changed, reason := c.compareAnnotations(cur.Annotations, new.Annotations, nil); changed { + return false, "new PDB's annotations do not match the current ones:" + reason + } return true, "" } +func getPgVersion(cronJob *batchv1.CronJob) string { + envs := cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env + for _, env := range envs { + if env.Name == "PG_VERSION" { + return env.Value + } + } + return "" +} + +// addFinalizer patches the postgresql CR to add finalizer +func (c *Cluster) addFinalizer() error { + if c.hasFinalizer() { + return nil + } + + c.logger.Infof("adding finalizer %s", finalizerName) + finalizers := append(c.ObjectMeta.Finalizers, finalizerName) + newSpec, err := c.KubeClient.SetFinalizer(c.clusterName(), c.DeepCopy(), finalizers) + if err != nil { + return fmt.Errorf("error adding finalizer: %v", err) + } + + // update the spec, maintaining the new resourceVersion + c.setSpec(newSpec) + + return nil +} + +// removeFinalizer patches postgresql CR to remove finalizer +func (c *Cluster) removeFinalizer() error { + if !c.hasFinalizer() { + return nil + } + + c.logger.Infof("removing finalizer %s", finalizerName) + finalizers := util.RemoveString(c.ObjectMeta.Finalizers, finalizerName) + newSpec, err := c.KubeClient.SetFinalizer(c.clusterName(), c.DeepCopy(), finalizers) + if err != nil { + return fmt.Errorf("error removing finalizer: %v", err) + } + + // update the spec, maintaining the new resourceVersion. + c.setSpec(newSpec) + + return nil +} + +// hasFinalizer checks if finalizer is currently set or not +func (c *Cluster) hasFinalizer() bool { + for _, finalizer := range c.ObjectMeta.Finalizers { + if finalizer == finalizerName { + return true + } + } + return false +} + // Update changes Kubernetes objects according to the new specification. Unlike the sync case, the missing object // (i.e. service) is treated as an error // logical backup cron jobs are an exception: a user-initiated Update can enable a logical backup job @@ -769,19 +965,33 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) { func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { updateFailed := false userInitFailed := false - syncStatefulSet := false c.mu.Lock() defer c.mu.Unlock() c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating) + + if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) { + // do not apply any major version related changes yet + newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion + } c.setSpec(newSpec) defer func() { + var ( + pgUpdatedStatus *acidv1.Postgresql + err error + ) if updateFailed { - c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed) + pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdateFailed) } else { - c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) + pgUpdatedStatus, err = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) + } + if err != nil { + c.logger.Warningf("could not set cluster status: %v", err) + } + if pgUpdatedStatus != nil { + c.setSpec(pgUpdatedStatus) } }() @@ -790,7 +1000,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { if IsBiggerPostgresVersion(oldSpec.Spec.PostgresqlParam.PgVersion, c.GetDesiredMajorVersion()) { c.logger.Infof("postgresql version increased (%s -> %s), depending on config manual upgrade needed", oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) - syncStatefulSet = true } else { c.logger.Infof("postgresql major version unchanged or smaller, no changes needed") // sticking with old version, this will also advance GetDesiredVersion next time. @@ -798,12 +1007,15 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } // Service - if !reflect.DeepEqual(c.generateService(Master, &oldSpec.Spec), c.generateService(Master, &newSpec.Spec)) || - !reflect.DeepEqual(c.generateService(Replica, &oldSpec.Spec), c.generateService(Replica, &newSpec.Spec)) { - if err := c.syncServices(); err != nil { - c.logger.Errorf("could not sync services: %v", err) - updateFailed = true - } + if err := c.syncServices(); err != nil { + c.logger.Errorf("could not sync services: %v", err) + updateFailed = true + } + + // Patroni service and endpoints / config maps + if err := c.syncPatroniResources(); err != nil { + c.logger.Errorf("could not sync services: %v", err) + updateFailed = true } // Users @@ -822,8 +1034,19 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // only when streams were not specified in oldSpec but in newSpec needStreamUser := len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0 - if !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser { - c.logger.Debugf("initialize users") + initUsers := !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser + + // if inherited annotations differ secrets have to be synced on update + newAnnotations := c.annotationsSet(nil) + oldAnnotations := make(map[string]string) + for _, secret := range c.Secrets { + oldAnnotations = secret.ObjectMeta.Annotations + break + } + annotationsChanged, _ := c.compareAnnotations(oldAnnotations, newAnnotations, nil) + + if initUsers || annotationsChanged { + c.logger.Debug("initialize users") if err := c.initUsers(); err != nil { c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err) userInitFailed = true @@ -831,7 +1054,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { return } - c.logger.Debugf("syncing secrets") + c.logger.Debug("syncing secrets") //TODO: mind the secrets of the deleted/new users if err := c.syncSecrets(); err != nil { c.logger.Errorf("could not sync secrets: %v", err) @@ -844,56 +1067,36 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { if c.OpConfig.StorageResizeMode != "off" { c.syncVolumes() } else { - c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") - } - - // streams configuration - if len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0 { - syncStatefulSet = true + c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume size sync.") } // Statefulset func() { - oldSs, err := c.generateStatefulSet(&oldSpec.Spec) - if err != nil { - c.logger.Errorf("could not generate old statefulset spec: %v", err) + if err := c.syncStatefulSet(); err != nil { + c.logger.Errorf("could not sync statefulsets: %v", err) updateFailed = true - return - } - - newSs, err := c.generateStatefulSet(&newSpec.Spec) - if err != nil { - c.logger.Errorf("could not generate new statefulset spec: %v", err) - updateFailed = true - return - } - - if syncStatefulSet || !reflect.DeepEqual(oldSs, newSs) { - c.logger.Debugf("syncing statefulsets") - syncStatefulSet = false - // TODO: avoid generating the StatefulSet object twice by passing it to syncStatefulSet - if err := c.syncStatefulSet(); err != nil { - c.logger.Errorf("could not sync statefulsets: %v", err) - updateFailed = true - } } }() - // pod disruption budget - if oldSpec.Spec.NumberOfInstances != newSpec.Spec.NumberOfInstances { - c.logger.Debug("syncing pod disruption budgets") - if err := c.syncPodDisruptionBudget(true); err != nil { - c.logger.Errorf("could not sync pod disruption budget: %v", err) - updateFailed = true + // add or remove standby_cluster section from Patroni config depending on changes in standby section + if !reflect.DeepEqual(oldSpec.Spec.StandbyCluster, newSpec.Spec.StandbyCluster) { + if err := c.syncStandbyClusterConfiguration(); err != nil { + return fmt.Errorf("could not set StandbyCluster configuration options: %v", err) } } + // pod disruption budgets + if err := c.syncPodDisruptionBudgets(true); err != nil { + c.logger.Errorf("could not sync pod disruption budgets: %v", err) + updateFailed = true + } + // logical backup job func() { // create if it did not exist if !oldSpec.Spec.EnableLogicalBackup && newSpec.Spec.EnableLogicalBackup { - c.logger.Debugf("creating backup cron job") + c.logger.Debug("creating backup cron job") if err := c.createLogicalBackupJob(); err != nil { c.logger.Errorf("could not create a k8s cron job for logical backups: %v", err) updateFailed = true @@ -903,7 +1106,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // delete if no longer needed if oldSpec.Spec.EnableLogicalBackup && !newSpec.Spec.EnableLogicalBackup { - c.logger.Debugf("deleting backup cron job") + c.logger.Debug("deleting backup cron job") if err := c.deleteLogicalBackupJob(); err != nil { c.logger.Errorf("could not delete a k8s cron job for logical backups: %v", err) updateFailed = true @@ -912,11 +1115,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } - // apply schedule changes - // this is the only parameter of logical backups a user can overwrite in the cluster manifest - if (oldSpec.Spec.EnableLogicalBackup && newSpec.Spec.EnableLogicalBackup) && - (newSpec.Spec.LogicalBackupSchedule != oldSpec.Spec.LogicalBackupSchedule) { - c.logger.Debugf("updating schedule of the backup cron job") + if oldSpec.Spec.EnableLogicalBackup && newSpec.Spec.EnableLogicalBackup { if err := c.syncLogicalBackupJob(); err != nil { c.logger.Errorf("could not sync logical backup jobs: %v", err) updateFailed = true @@ -927,7 +1126,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // Roles and Databases if !userInitFailed && !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) { - c.logger.Debugf("syncing roles") + c.logger.Debug("syncing roles") if err := c.syncRoles(); err != nil { c.logger.Errorf("could not sync roles: %v", err) updateFailed = true @@ -960,7 +1159,8 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } // streams - if len(newSpec.Spec.Streams) > 0 { + if len(newSpec.Spec.Streams) > 0 || len(oldSpec.Spec.Streams) != len(newSpec.Spec.Streams) { + c.logger.Debug("syncing streams") if err := c.syncStreams(); err != nil { c.logger.Errorf("could not sync streams: %v", err) updateFailed = true @@ -997,48 +1197,68 @@ func syncResources(a, b *v1.ResourceRequirements) bool { // DCS, reuses the master's endpoint to store the leader related metadata. If we remove the endpoint // before the pods, it will be re-created by the current master pod and will remain, obstructing the // creation of the new cluster with the same name. Therefore, the endpoints should be deleted last. -func (c *Cluster) Delete() { +func (c *Cluster) Delete() error { + var anyErrors = false c.mu.Lock() defer c.mu.Unlock() - c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Delete", "Started deletion of new cluster resources") + c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Delete", "Started deletion of cluster resources") if err := c.deleteStreams(); err != nil { + anyErrors = true c.logger.Warningf("could not delete event streams: %v", err) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete event streams: %v", err) } // delete the backup job before the stateful set of the cluster to prevent connections to non-existing pods // deleting the cron job also removes pods and batch jobs it created if err := c.deleteLogicalBackupJob(); err != nil { + anyErrors = true c.logger.Warningf("could not remove the logical backup k8s cron job; %v", err) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not remove the logical backup k8s cron job; %v", err) } if err := c.deleteStatefulSet(); err != nil { + anyErrors = true c.logger.Warningf("could not delete statefulset: %v", err) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete statefulset: %v", err) } - if err := c.deleteSecrets(); err != nil { - c.logger.Warningf("could not delete secrets: %v", err) + if c.OpConfig.EnableSecretsDeletion != nil && *c.OpConfig.EnableSecretsDeletion { + if err := c.deleteSecrets(); err != nil { + anyErrors = true + c.logger.Warningf("could not delete secrets: %v", err) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete secrets: %v", err) + } + } else { + c.logger.Info("not deleting secrets because disabled in configuration") } - if err := c.deletePodDisruptionBudget(); err != nil { - c.logger.Warningf("could not delete pod disruption budget: %v", err) + if err := c.deletePodDisruptionBudgets(); err != nil { + anyErrors = true + c.logger.Warningf("could not delete pod disruption budgets: %v", err) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budgets: %v", err) } for _, role := range []PostgresRole{Master, Replica} { - if !c.patroniKubernetesUseConfigMaps() { if err := c.deleteEndpoint(role); err != nil { + anyErrors = true c.logger.Warningf("could not delete %s endpoint: %v", role, err) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete %s endpoint: %v", role, err) } } if err := c.deleteService(role); err != nil { + anyErrors = true c.logger.Warningf("could not delete %s service: %v", role, err) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete %s service: %v", role, err) } } - if err := c.deletePatroniClusterObjects(); err != nil { - c.logger.Warningf("could not remove leftover patroni objects; %v", err) + if err := c.deletePatroniResources(); err != nil { + anyErrors = true + c.logger.Warningf("could not delete all Patroni resources: %v", err) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete all Patroni resources: %v", err) } // Delete connection pooler objects anyway, even if it's not mentioned in the @@ -1046,10 +1266,22 @@ func (c *Cluster) Delete() { // wrong for _, role := range [2]PostgresRole{Master, Replica} { if err := c.deleteConnectionPooler(role); err != nil { + anyErrors = true c.logger.Warningf("could not remove connection pooler: %v", err) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not remove connection pooler: %v", err) } } + // If we are done deleting our various resources we remove the finalizer to let K8S finally delete the Postgres CR + if anyErrors { + c.eventRecorder.Event(c.GetReference(), v1.EventTypeWarning, "Delete", "some resources could be successfully deleted yet") + return fmt.Errorf("some error(s) occured when deleting resources, NOT removing finalizer yet") + } + if err := c.removeFinalizer(); err != nil { + return fmt.Errorf("done cleaning up, but error when removing finalizer: %v", err) + } + + return nil } // NeedsRepair returns true if the cluster should be included in the repair scan (based on its in-memory status). @@ -1067,7 +1299,7 @@ func (c *Cluster) ReceivePodEvent(event PodEvent) { } } -func (c *Cluster) processPodEvent(obj interface{}) error { +func (c *Cluster) processPodEvent(obj interface{}, isInInitialList bool) error { event, ok := obj.(PodEvent) if !ok { return fmt.Errorf("could not cast to PodEvent") @@ -1190,18 +1422,18 @@ func (c *Cluster) initPreparedDatabaseRoles() error { preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}} } - var searchPath strings.Builder - searchPath.WriteString(constants.DefaultSearchPath) + searchPathArr := []string{constants.DefaultSearchPath} for preparedSchemaName := range preparedSchemas { - searchPath.WriteString(", " + preparedSchemaName) + searchPathArr = append(searchPathArr, fmt.Sprintf("%q", preparedSchemaName)) } + searchPath := strings.Join(searchPathArr, ", ") // default roles per database - if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String(), preparedDB.SecretNamespace); err != nil { + if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath, preparedDB.SecretNamespace); err != nil { return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) } if preparedDB.DefaultUsers { - if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String(), preparedDB.SecretNamespace); err != nil { + if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath, preparedDB.SecretNamespace); err != nil { return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) } } @@ -1212,14 +1444,16 @@ func (c *Cluster) initPreparedDatabaseRoles() error { if err := c.initDefaultRoles(defaultRoles, preparedDbName+constants.OwnerRoleNameSuffix, preparedDbName+"_"+preparedSchemaName, - constants.DefaultSearchPath+", "+preparedSchemaName, preparedDB.SecretNamespace); err != nil { + fmt.Sprintf("%s, %q", constants.DefaultSearchPath, preparedSchemaName), + preparedDB.SecretNamespace); err != nil { return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err) } if preparedSchema.DefaultUsers { if err := c.initDefaultRoles(defaultUsers, preparedDbName+constants.OwnerRoleNameSuffix, preparedDbName+"_"+preparedSchemaName, - constants.DefaultSearchPath+", "+preparedSchemaName, preparedDB.SecretNamespace); err != nil { + fmt.Sprintf("%s, %q", constants.DefaultSearchPath, preparedSchemaName), + preparedDB.SecretNamespace); err != nil { return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err) } } @@ -1501,16 +1735,17 @@ func (c *Cluster) GetCurrentProcess() Process { // GetStatus provides status of the cluster func (c *Cluster) GetStatus() *ClusterStatus { status := &ClusterStatus{ - Cluster: c.Name, - Namespace: c.Namespace, - Team: c.Spec.TeamID, - Status: c.Status, - Spec: c.Spec, - MasterService: c.GetServiceMaster(), - ReplicaService: c.GetServiceReplica(), - StatefulSet: c.GetStatefulSet(), - PodDisruptionBudget: c.GetPodDisruptionBudget(), - CurrentProcess: c.GetCurrentProcess(), + Cluster: c.Name, + Namespace: c.Namespace, + Team: c.Spec.TeamID, + Status: c.Status, + Spec: c.Spec, + MasterService: c.GetServiceMaster(), + ReplicaService: c.GetServiceReplica(), + StatefulSet: c.GetStatefulSet(), + PrimaryPodDisruptionBudget: c.GetPrimaryPodDisruptionBudget(), + CriticalOpPodDisruptionBudget: c.GetCriticalOpPodDisruptionBudget(), + CurrentProcess: c.GetCurrentProcess(), Error: fmt.Errorf("error: %s", c.Error), } @@ -1523,18 +1758,58 @@ func (c *Cluster) GetStatus() *ClusterStatus { return status } -// Switchover does a switchover (via Patroni) to a candidate pod -func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) error { +func (c *Cluster) GetSwitchoverSchedule() string { + var possibleSwitchover, schedule time.Time + + now := time.Now().UTC() + for _, window := range c.Spec.MaintenanceWindows { + // in the best case it is possible today + possibleSwitchover = time.Date(now.Year(), now.Month(), now.Day(), window.StartTime.Hour(), window.StartTime.Minute(), 0, 0, time.UTC) + if window.Everyday { + if now.After(possibleSwitchover) { + // we are already past the time for today, try tomorrow + possibleSwitchover = possibleSwitchover.AddDate(0, 0, 1) + } + } else { + if now.Weekday() != window.Weekday { + // get closest possible time for this window + possibleSwitchover = possibleSwitchover.AddDate(0, 0, int((7+window.Weekday-now.Weekday())%7)) + } else if now.After(possibleSwitchover) { + // we are already past the time for today, try next week + possibleSwitchover = possibleSwitchover.AddDate(0, 0, 7) + } + } + if (schedule == time.Time{}) || possibleSwitchover.Before(schedule) { + schedule = possibleSwitchover + } + } + return schedule.Format("2006-01-02T15:04+00") +} + +// Switchover does a switchover (via Patroni) to a candidate pod +func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName, scheduled bool) error { var err error - c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate) - c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate) + stopCh := make(chan struct{}) ch := c.registerPodSubscriber(candidate) defer c.unregisterPodSubscriber(candidate) defer close(stopCh) - if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil { + var scheduled_at string + if scheduled { + scheduled_at = c.GetSwitchoverSchedule() + } else { + c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate) + scheduled_at = "" + } + + if err = c.patroni.Switchover(curMaster, candidate.Name, scheduled_at); err == nil { + if scheduled { + c.logger.Infof("switchover from %q to %q is scheduled at %s", curMaster.Name, candidate, scheduled_at) + return nil + } c.logger.Debugf("successfully switched over from %q to %q", curMaster.Name, candidate) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Successfully switched over from %q to %q", curMaster.Name, candidate) _, err = c.waitForPodLabel(ch, stopCh, nil) @@ -1542,6 +1817,9 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e err = fmt.Errorf("could not get master pod label: %v", err) } } else { + if scheduled { + return fmt.Errorf("could not schedule switchover: %v", err) + } err = fmt.Errorf("could not switch over from %q to %q: %v", curMaster.Name, candidate, err) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switchover from %q to %q FAILED: %v", curMaster.Name, candidate, err) } @@ -1558,96 +1836,3 @@ func (c *Cluster) Lock() { func (c *Cluster) Unlock() { c.mu.Unlock() } - -type simpleActionWithResult func() - -type clusterObjectGet func(name string) (spec.NamespacedName, error) - -type clusterObjectDelete func(name string) error - -func (c *Cluster) deletePatroniClusterObjects() error { - // TODO: figure out how to remove leftover patroni objects in other cases - var actionsList []simpleActionWithResult - - if !c.patroniUsesKubernetes() { - c.logger.Infof("not cleaning up Etcd Patroni objects on cluster delete") - } - - actionsList = append(actionsList, c.deletePatroniClusterServices) - if c.patroniKubernetesUseConfigMaps() { - actionsList = append(actionsList, c.deletePatroniClusterConfigMaps) - } else { - actionsList = append(actionsList, c.deletePatroniClusterEndpoints) - } - - c.logger.Debugf("removing leftover Patroni objects (endpoints / services and configmaps)") - for _, deleter := range actionsList { - deleter() - } - return nil -} - -func deleteClusterObject( - get clusterObjectGet, - del clusterObjectDelete, - objType string, - clusterName string, - logger *logrus.Entry) { - for _, suffix := range patroniObjectSuffixes { - name := fmt.Sprintf("%s-%s", clusterName, suffix) - - namespacedName, err := get(name) - if err == nil { - logger.Debugf("deleting %s %q", - objType, namespacedName) - - if err = del(name); err != nil { - logger.Warningf("could not delete %s %q: %v", - objType, namespacedName, err) - } - - } else if !k8sutil.ResourceNotFound(err) { - logger.Warningf("could not fetch %s %q: %v", - objType, namespacedName, err) - } - } -} - -func (c *Cluster) deletePatroniClusterServices() { - get := func(name string) (spec.NamespacedName, error) { - svc, err := c.KubeClient.Services(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) - return util.NameFromMeta(svc.ObjectMeta), err - } - - deleteServiceFn := func(name string) error { - return c.KubeClient.Services(c.Namespace).Delete(context.TODO(), name, c.deleteOptions) - } - - deleteClusterObject(get, deleteServiceFn, "service", c.Name, c.logger) -} - -func (c *Cluster) deletePatroniClusterEndpoints() { - get := func(name string) (spec.NamespacedName, error) { - ep, err := c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) - return util.NameFromMeta(ep.ObjectMeta), err - } - - deleteEndpointFn := func(name string) error { - return c.KubeClient.Endpoints(c.Namespace).Delete(context.TODO(), name, c.deleteOptions) - } - - deleteClusterObject(get, deleteEndpointFn, "endpoint", c.Name, c.logger) -} - -func (c *Cluster) deletePatroniClusterConfigMaps() { - get := func(name string) (spec.NamespacedName, error) { - cm, err := c.KubeClient.ConfigMaps(c.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) - return util.NameFromMeta(cm.ObjectMeta), err - } - - deleteConfigMapFn := func(name string) error { - return c.KubeClient.ConfigMaps(c.Namespace).Delete(context.TODO(), name, c.deleteOptions) - } - - deleteClusterObject(get, deleteConfigMapFn, "configmap", c.Name, c.logger) -} diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 29272eac9..09d9df972 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -1,11 +1,13 @@ package cluster import ( + "context" "fmt" "net/http" "reflect" "strings" "testing" + "time" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -16,8 +18,11 @@ import ( "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/teams" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/record" @@ -26,18 +31,23 @@ import ( const ( superUserName = "postgres" replicationUserName = "standby" - exampleSpiloConfig = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}` - spiloConfigDiff = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}` + poolerUserName = "pooler" + adminUserName = "admin" + exampleSpiloConfig = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}` + spiloConfigDiff = `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}` ) var logger = logrus.New().WithField("test", "cluster") -var eventRecorder = record.NewFakeRecorder(1) + +// eventRecorder needs buffer for TestCreate which emit events for +// 1 cluster, primary endpoint, 2 services, the secrets, the statefulset and pods being ready +var eventRecorder = record.NewFakeRecorder(7) var cl = New( Config{ OpConfig: config.Config{ PodManagementPolicy: "ordered_ready", - ProtectedRoles: []string{"admin", "cron_admin", "part_man"}, + ProtectedRoles: []string{adminUserName, "cron_admin", "part_man"}, Auth: config.Auth{ SuperUsername: superUserName, ReplicationUsername: replicationUserName, @@ -46,6 +56,9 @@ var cl = New( Resources: config.Resources{ DownscalerAnnotations: []string{"downscaler/*"}, }, + ConnectionPooler: config.ConnectionPooler{ + User: poolerUserName, + }, }, }, k8sutil.NewMockKubernetesClient(), @@ -55,17 +68,106 @@ var cl = New( Namespace: "test", Annotations: map[string]string{"downscaler/downtime_replicas": "0"}, }, + Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: util.True(), + Streams: []acidv1.Stream{ + { + ApplicationId: "test-app", + Database: "test_db", + Tables: map[string]acidv1.StreamTable{ + "test_table": { + EventType: "test-app.test", + }, + }, + }, + }, + }, }, logger, eventRecorder, ) +func TestCreate(t *testing.T) { + clientSet := fake.NewSimpleClientset() + acidClientSet := fakeacidv1.NewSimpleClientset() + clusterName := "cluster-with-finalizer" + clusterNamespace := "test" + + client := k8sutil.KubernetesClient{ + DeploymentsGetter: clientSet.AppsV1(), + CronJobsGetter: clientSet.BatchV1(), + EndpointsGetter: clientSet.CoreV1(), + PersistentVolumeClaimsGetter: clientSet.CoreV1(), + PodDisruptionBudgetsGetter: clientSet.PolicyV1(), + PodsGetter: clientSet.CoreV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + ServicesGetter: clientSet.CoreV1(), + SecretsGetter: clientSet.CoreV1(), + StatefulSetsGetter: clientSet.AppsV1(), + } + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: clusterNamespace, + }, + Spec: acidv1.PostgresSpec{ + EnableLogicalBackup: true, + Volume: acidv1.Volume{ + Size: "1Gi", + }, + }, + } + + pod := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-0", clusterName), + Namespace: clusterNamespace, + Labels: map[string]string{ + "application": "spilo", + "cluster-name": clusterName, + "spilo-role": "master", + }, + }, + } + + // manually create resources which must be found by further API calls and are not created by cluster.Create() + client.Postgresqls(clusterNamespace).Create(context.TODO(), &pg, metav1.CreateOptions{}) + client.Pods(clusterNamespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + + var cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", + ResourceCheckInterval: time.Duration(3), + ResourceCheckTimeout: time.Duration(10), + }, + EnableFinalizers: util.True(), + }, + }, client, pg, logger, eventRecorder) + + err := cluster.Create() + assert.NoError(t, err) + + if !cluster.hasFinalizer() { + t.Errorf("%s - expected finalizer not found on cluster", t.Name()) + } +} + func TestStatefulSetAnnotations(t *testing.T) { spec := acidv1.PostgresSpec{ TeamID: "myapp", NumberOfInstances: 1, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, Volume: acidv1.Volume{ Size: "1G", @@ -87,8 +189,8 @@ func TestStatefulSetUpdateWithEnv(t *testing.T) { oldSpec := &acidv1.PostgresSpec{ TeamID: "myapp", NumberOfInstances: 1, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, Volume: acidv1.Volume{ Size: "1G", @@ -127,56 +229,85 @@ func TestStatefulSetUpdateWithEnv(t *testing.T) { func TestInitRobotUsers(t *testing.T) { tests := []struct { + testCase string manifestUsers map[string]acidv1.UserFlags infraRoles map[string]spec.PgUser result map[string]spec.PgUser err error }{ { + testCase: "manifest user called like infrastructure role - latter should take percedence", manifestUsers: map[string]acidv1.UserFlags{"foo": {"superuser", "createdb"}}, infraRoles: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Namespace: cl.Namespace, Password: "bar"}}, result: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Namespace: cl.Namespace, Password: "bar"}}, err: nil, }, { + testCase: "manifest user with forbidden characters", manifestUsers: map[string]acidv1.UserFlags{"!fooBar": {"superuser", "createdb"}}, err: fmt.Errorf(`invalid username: "!fooBar"`), }, { + testCase: "manifest user with unknown privileges (should be catched by CRD, too)", manifestUsers: map[string]acidv1.UserFlags{"foobar": {"!superuser", "createdb"}}, err: fmt.Errorf(`invalid flags for user "foobar": ` + `user flag "!superuser" is not alphanumeric`), }, { + testCase: "manifest user with unknown privileges - part 2 (should be catched by CRD, too)", manifestUsers: map[string]acidv1.UserFlags{"foobar": {"superuser1", "createdb"}}, err: fmt.Errorf(`invalid flags for user "foobar": ` + `user flag "SUPERUSER1" is not valid`), }, { + testCase: "manifest user with conflicting flags", manifestUsers: map[string]acidv1.UserFlags{"foobar": {"inherit", "noinherit"}}, err: fmt.Errorf(`invalid flags for user "foobar": ` + `conflicting user flags: "NOINHERIT" and "INHERIT"`), }, { - manifestUsers: map[string]acidv1.UserFlags{"admin": {"superuser"}, superUserName: {"createdb"}}, + testCase: "manifest user called like Spilo system users", + manifestUsers: map[string]acidv1.UserFlags{superUserName: {"createdb"}, replicationUserName: {"replication"}}, + infraRoles: map[string]spec.PgUser{}, + result: map[string]spec.PgUser{}, + err: nil, + }, + { + testCase: "manifest user called like protected user name", + manifestUsers: map[string]acidv1.UserFlags{adminUserName: {"superuser"}}, + infraRoles: map[string]spec.PgUser{}, + result: map[string]spec.PgUser{}, + err: nil, + }, + { + testCase: "manifest user called like pooler system user", + manifestUsers: map[string]acidv1.UserFlags{poolerUserName: {}}, + infraRoles: map[string]spec.PgUser{}, + result: map[string]spec.PgUser{}, + err: nil, + }, + { + testCase: "manifest user called like stream system user", + manifestUsers: map[string]acidv1.UserFlags{"fes_user": {"replication"}}, infraRoles: map[string]spec.PgUser{}, result: map[string]spec.PgUser{}, err: nil, }, } + cl.initSystemUsers() for _, tt := range tests { cl.Spec.Users = tt.manifestUsers cl.pgUsers = tt.infraRoles if err := cl.initRobotUsers(); err != nil { if tt.err == nil { - t.Errorf("%s got an unexpected error: %v", t.Name(), err) + t.Errorf("%s - %s: got an unexpected error: %v", tt.testCase, t.Name(), err) } if err.Error() != tt.err.Error() { - t.Errorf("%s expected error %v, got %v", t.Name(), tt.err, err) + t.Errorf("%s - %s: expected error %v, got %v", tt.testCase, t.Name(), tt.err, err) } } else { if !reflect.DeepEqual(cl.pgUsers, tt.result) { - t.Errorf("%s expected: %#v, got %#v", t.Name(), tt.result, cl.pgUsers) + t.Errorf("%s - %s: expected: %#v, got %#v", tt.testCase, t.Name(), tt.result, cl.pgUsers) } } } @@ -269,7 +400,7 @@ func TestInitHumanUsers(t *testing.T) { }, { existingRoles: map[string]spec.PgUser{}, - teamRoles: []string{"admin", replicationUserName}, + teamRoles: []string{adminUserName, replicationUserName}, result: map[string]spec.PgUser{}, err: nil, }, @@ -896,6 +1027,11 @@ func TestServiceAnnotations(t *testing.T) { } func TestInitSystemUsers(t *testing.T) { + // reset system users, pooler and stream section + cl.systemUsers = make(map[string]spec.PgUser) + cl.Spec.EnableConnectionPooler = boolToPointer(false) + cl.Spec.Streams = []acidv1.Stream{} + // default cluster without connection pooler and event streams cl.initSystemUsers() if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; exist { @@ -914,35 +1050,35 @@ func TestInitSystemUsers(t *testing.T) { // superuser is not allowed as connection pool user cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ - User: "postgres", + User: superUserName, } - cl.OpConfig.SuperUsername = "postgres" - cl.OpConfig.ConnectionPooler.User = "pooler" + cl.OpConfig.SuperUsername = superUserName + cl.OpConfig.ConnectionPooler.User = poolerUserName cl.initSystemUsers() - if _, exist := cl.systemUsers["pooler"]; !exist { + if _, exist := cl.systemUsers[poolerUserName]; !exist { t.Errorf("%s, Superuser is not allowed to be a connection pool user", t.Name()) } // neither protected users are - delete(cl.systemUsers, "pooler") + delete(cl.systemUsers, poolerUserName) cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ - User: "admin", + User: adminUserName, } - cl.OpConfig.ProtectedRoles = []string{"admin"} + cl.OpConfig.ProtectedRoles = []string{adminUserName} cl.initSystemUsers() - if _, exist := cl.systemUsers["pooler"]; !exist { + if _, exist := cl.systemUsers[poolerUserName]; !exist { t.Errorf("%s, Protected user are not allowed to be a connection pool user", t.Name()) } - delete(cl.systemUsers, "pooler") + delete(cl.systemUsers, poolerUserName) cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{ - User: "standby", + User: replicationUserName, } cl.initSystemUsers() - if _, exist := cl.systemUsers["pooler"]; !exist { + if _, exist := cl.systemUsers[poolerUserName]; !exist { t.Errorf("%s, System users are not allowed to be a connection pool user", t.Name()) } @@ -960,8 +1096,8 @@ func TestInitSystemUsers(t *testing.T) { ApplicationId: "test-app", Database: "test_db", Tables: map[string]acidv1.StreamTable{ - "data.test_table": { - EventType: "test_event", + "test_table": { + EventType: "test-app.test", }, }, }, @@ -1017,7 +1153,7 @@ func TestPreparedDatabases(t *testing.T) { subTest: "Test admin role of owner", role: "foo_owner", memberOf: "", - admin: "admin", + admin: adminUserName, }, { subTest: "Test writer is a member of reader", @@ -1062,17 +1198,13 @@ func TestCompareSpiloConfiguration(t *testing.T) { ExpectedResult bool }{ { - `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`, + `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`, true, }, { - `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"200","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`, + `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"200","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`, true, }, - { - `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"200","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`, - false, - }, { `{}`, false, @@ -1233,6 +1365,23 @@ func TestCompareServices(t *testing.T) { }, } + serviceWithOwnerReference := newService( + map[string]string{ + constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", + constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + }, + v1.ServiceTypeClusterIP, + []string{"128.141.0.0/16", "137.138.0.0/16"}) + + ownerRef := metav1.OwnerReference{ + APIVersion: "acid.zalan.do/v1", + Controller: boolToPointer(true), + Kind: "Postgresql", + Name: "clstr", + } + + serviceWithOwnerReference.ObjectMeta.OwnerReferences = append(serviceWithOwnerReference.ObjectMeta.OwnerReferences, ownerRef) + tests := []struct { about string current *v1.Service @@ -1316,222 +1465,235 @@ func TestCompareServices(t *testing.T) { reason: `new service's LoadBalancerSourceRange does not match the current one`, }, { - about: "services differ on DNS annotation", + about: "new service doesn't have owner references", current: newService( map[string]string{ constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - new: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "new_clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - }, - v1.ServiceTypeLoadBalancer, + v1.ServiceTypeClusterIP, []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: false, - reason: `new service's annotations does not match the current one: "external-dns.alpha.kubernetes.io/hostname" changed from "clstr.acid.zalan.do" to "new_clstr.acid.zalan.do".`, + new: serviceWithOwnerReference, + match: false, }, - { - about: "services differ on AWS ELB annotation", - current: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - new: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: "1800", + } + + for _, tt := range tests { + t.Run(tt.about, func(t *testing.T) { + match, reason := cluster.compareServices(tt.current, tt.new) + if match && !tt.match { + t.Logf("match=%v current=%v, old=%v reason=%s", match, tt.current.Annotations, tt.new.Annotations, reason) + t.Errorf("%s - expected services to do not match: %q and %q", t.Name(), tt.current, tt.new) + } + if !match && tt.match { + t.Errorf("%s - expected services to be the same: %q and %q", t.Name(), tt.current, tt.new) + } + if !match && !tt.match { + if !strings.HasPrefix(reason, tt.reason) { + t.Errorf("%s - expected reason prefix %s, found %s", t.Name(), tt.reason, reason) + } + } + }) + } +} + +func newCronJob(image, schedule string, vars []v1.EnvVar, mounts []v1.VolumeMount) *batchv1.CronJob { + cron := &batchv1.CronJob{ + Spec: batchv1.CronJobSpec{ + Schedule: schedule, + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "logical-backup", + Image: image, + Env: vars, + Ports: []v1.ContainerPort{ + { + ContainerPort: patroni.ApiPort, + Protocol: v1.ProtocolTCP, + }, + { + ContainerPort: pgPort, + Protocol: v1.ProtocolTCP, + }, + { + ContainerPort: operatorPort, + Protocol: v1.ProtocolTCP, + }, + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100m"), + v1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + SecurityContext: &v1.SecurityContext{ + AllowPrivilegeEscalation: nil, + Privileged: util.False(), + ReadOnlyRootFilesystem: util.False(), + Capabilities: nil, + }, + VolumeMounts: mounts, + }, + }, + }, + }, }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: false, - reason: `new service's annotations does not match the current one: "service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout" changed from "3600" to "1800".`, + }, }, - { - about: "service changes existing annotation", - current: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - "foo": "bar", - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - new: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - "foo": "baz", - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: false, - reason: `new service's annotations does not match the current one: "foo" changed from "bar" to "baz".`, + } + return cron +} + +func TestCompareLogicalBackupJob(t *testing.T) { + + img1 := "registry.opensource.zalan.do/acid/logical-backup:v1.0" + img2 := "registry.opensource.zalan.do/acid/logical-backup:v2.0" + + clientSet := fake.NewSimpleClientset() + acidClientSet := fakeacidv1.NewSimpleClientset() + namespace := "default" + + client := k8sutil.KubernetesClient{ + CronJobsGetter: clientSet.BatchV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + } + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acid-cron-cluster", + Namespace: namespace, }, - { - about: "service changes multiple existing annotations", - current: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - "foo": "bar", - "bar": "foo", - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - new: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - "foo": "baz", - "bar": "fooz", - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: false, - // Test just the prefix to avoid flakiness and map sorting - reason: `new service's annotations does not match the current one:`, + Spec: acidv1.PostgresSpec{ + Volume: acidv1.Volume{ + Size: "1Gi", + }, + EnableLogicalBackup: true, + LogicalBackupSchedule: "0 0 * * *", + LogicalBackupRetention: "3 months", }, - { - about: "service adds a new custom annotation", - current: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - new: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - "foo": "bar", + LogicalBackup: config.LogicalBackup{ + LogicalBackupSchedule: "30 00 * * *", + LogicalBackupDockerImage: img1, + LogicalBackupJobPrefix: "logical-backup-", + LogicalBackupCPURequest: "100m", + LogicalBackupCPULimit: "100m", + LogicalBackupMemoryRequest: "100Mi", + LogicalBackupMemoryLimit: "100Mi", + LogicalBackupProvider: "s3", + LogicalBackupS3Bucket: "testBucket", + LogicalBackupS3BucketPrefix: "spilo", + LogicalBackupS3Region: "eu-central-1", + LogicalBackupS3Endpoint: "https://s3.amazonaws.com", + LogicalBackupS3AccessKeyID: "access", + LogicalBackupS3SecretAccessKey: "secret", + LogicalBackupS3SSE: "aws:kms", + LogicalBackupS3RetentionTime: "3 months", + LogicalBackupCronjobEnvironmentSecret: "", }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: false, - reason: `new service's annotations does not match the current one: Added "foo" with value "bar".`, - }, + }, + }, client, pg, logger, eventRecorder) + + desiredCronJob, err := cluster.generateLogicalBackupJob() + if err != nil { + t.Errorf("Could not generate logical backup job with error: %v", err) + } + + err = cluster.createLogicalBackupJob() + if err != nil { + t.Errorf("Could not create logical backup job with error: %v", err) + } + + currentCronJob, err := cluster.KubeClient.CronJobs(namespace).Get(context.TODO(), cluster.getLogicalBackupJobName(), metav1.GetOptions{}) + if err != nil { + t.Errorf("Could not create logical backup job with error: %v", err) + } + + tests := []struct { + about string + cronjob *batchv1.CronJob + match bool + reason string + }{ { - about: "service removes a custom annotation", - current: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - "foo": "bar", - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - new: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: false, - reason: `new service's annotations does not match the current one: Removed "foo".`, + about: "two equal cronjobs", + cronjob: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}, []v1.VolumeMount{}), + match: true, }, { - about: "service removes a custom annotation and adds a new one", - current: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - "foo": "bar", - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - new: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - "bar": "foo", - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: false, - reason: `new service's annotations does not match the current one: Removed "foo". Added "bar" with value "foo".`, + about: "two cronjobs with different image", + cronjob: newCronJob(img2, "0 0 * * *", []v1.EnvVar{}, []v1.VolumeMount{}), + match: false, + reason: fmt.Sprintf("new job's image %q does not match the current one %q", img2, img1), }, { - about: "service removes a custom annotation, adds a new one and change another", - current: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - "foo": "bar", - "zalan": "do", - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - new: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - "bar": "foo", - "zalan": "do.com", - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: false, - // Test just the prefix to avoid flakiness and map sorting - reason: `new service's annotations does not match the current one: Removed "foo".`, + about: "two cronjobs with different schedule", + cronjob: newCronJob(img1, "0 * * * *", []v1.EnvVar{}, []v1.VolumeMount{}), + match: false, + reason: fmt.Sprintf("new job's schedule %q does not match the current one %q", "0 * * * *", "0 0 * * *"), }, { - about: "service add annotations", - current: newService( - map[string]string{}, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - new: newService( - map[string]string{ - constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", - constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: false, - // Test just the prefix to avoid flakiness and map sorting - reason: `new service's annotations does not match the current one: Added `, + about: "two cronjobs with empty and nil volume mounts", + cronjob: newCronJob(img1, "0 0 * * *", []v1.EnvVar{}, nil), + match: true, }, { - about: "ignored annotations", - current: newService( - map[string]string{}, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - new: newService( - map[string]string{ - "k8s.v1.cni.cncf.io/network-status": "up", - }, - v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), - match: true, + about: "two cronjobs with different environment variables", + cronjob: newCronJob(img1, "0 0 * * *", []v1.EnvVar{{Name: "LOGICAL_BACKUP_S3_BUCKET_PREFIX", Value: "logical-backup"}}, []v1.VolumeMount{}), + match: false, + reason: "logical backup container specs do not match: new cronjob container's logical-backup (index 0) environment does not match the current one", }, } for _, tt := range tests { t.Run(tt.about, func(t *testing.T) { - match, reason := cluster.compareServices(tt.current, tt.new) - if match && !tt.match { - t.Logf("match=%v current=%v, old=%v reason=%s", match, tt.current.Annotations, tt.new.Annotations, reason) - t.Errorf("%s - expected services to do not match: %q and %q", t.Name(), tt.current, tt.new) - return - } - if !match && tt.match { - t.Errorf("%s - expected services to be the same: %q and %q", t.Name(), tt.current, tt.new) - return + desiredCronJob.Spec.Schedule = tt.cronjob.Spec.Schedule + desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image = tt.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image + desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts = tt.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].VolumeMounts + + for _, testEnv := range tt.cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env { + for i, env := range desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env { + if env.Name == testEnv.Name { + desiredCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env[i] = testEnv + } + } } - if !match && !tt.match { - if !strings.HasPrefix(reason, tt.reason) { - t.Errorf("%s - expected reason prefix %s, found %s", t.Name(), tt.reason, reason) - return + + cmp := cluster.compareLogicalBackupJob(currentCronJob, desiredCronJob) + if cmp.match != tt.match { + t.Errorf("%s - unexpected match result %t when comparing cronjobs %#v and %#v", t.Name(), cmp.match, currentCronJob, desiredCronJob) + } else if !cmp.match { + found := false + for _, reason := range cmp.reasons { + if strings.HasPrefix(reason, tt.reason) { + found = true + break + } + found = false + } + if !found { + t.Errorf("%s - expected reason prefix %s, not found in %#v", t.Name(), tt.reason, cmp.reasons) } } }) @@ -1723,3 +1885,271 @@ func TestComparePorts(t *testing.T) { }) } } + +func TestCompareVolumeMounts(t *testing.T) { + testCases := []struct { + name string + mountsA []v1.VolumeMount + mountsB []v1.VolumeMount + expected bool + }{ + { + name: "empty vs nil", + mountsA: []v1.VolumeMount{}, + mountsB: nil, + expected: true, + }, + { + name: "both empty", + mountsA: []v1.VolumeMount{}, + mountsB: []v1.VolumeMount{}, + expected: true, + }, + { + name: "same mounts", + mountsA: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + }, + mountsB: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + }, + expected: true, + }, + { + name: "different mounts", + mountsA: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPathExpr: "$(POD_NAME)", + }, + }, + mountsB: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + }, + expected: false, + }, + { + name: "one equal mount one different", + mountsA: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + { + Name: "poddata", + ReadOnly: false, + MountPath: "/poddata", + SubPathExpr: "$(POD_NAME)", + }, + }, + mountsB: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + { + Name: "etc", + ReadOnly: true, + MountPath: "/etc", + }, + }, + expected: false, + }, + { + name: "same mounts, different order", + mountsA: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + { + Name: "etc", + ReadOnly: true, + MountPath: "/etc", + }, + }, + mountsB: []v1.VolumeMount{ + { + Name: "etc", + ReadOnly: true, + MountPath: "/etc", + }, + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + }, + expected: true, + }, + { + name: "new mounts added", + mountsA: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + }, + mountsB: []v1.VolumeMount{ + { + Name: "etc", + ReadOnly: true, + MountPath: "/etc", + }, + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + }, + expected: false, + }, + { + name: "one mount removed", + mountsA: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + { + Name: "etc", + ReadOnly: true, + MountPath: "/etc", + }, + }, + mountsB: []v1.VolumeMount{ + { + Name: "data", + ReadOnly: false, + MountPath: "/data", + SubPath: "subdir", + }, + }, + expected: false, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got := compareVolumeMounts(tt.mountsA, tt.mountsB) + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestGetSwitchoverSchedule(t *testing.T) { + now := time.Now() + + futureTimeStart := now.Add(1 * time.Hour) + futureWindowTimeStart := futureTimeStart.Format("15:04") + futureWindowTimeEnd := now.Add(2 * time.Hour).Format("15:04") + pastTimeStart := now.Add(-2 * time.Hour) + pastWindowTimeStart := pastTimeStart.Format("15:04") + pastWindowTimeEnd := now.Add(-1 * time.Hour).Format("15:04") + + tests := []struct { + name string + windows []acidv1.MaintenanceWindow + expected string + }{ + { + name: "everyday maintenance windows is later today", + windows: []acidv1.MaintenanceWindow{ + { + Everyday: true, + StartTime: mustParseTime(futureWindowTimeStart), + EndTime: mustParseTime(futureWindowTimeEnd), + }, + }, + expected: futureTimeStart.Format("2006-01-02T15:04+00"), + }, + { + name: "everyday maintenance window is tomorrow", + windows: []acidv1.MaintenanceWindow{ + { + Everyday: true, + StartTime: mustParseTime(pastWindowTimeStart), + EndTime: mustParseTime(pastWindowTimeEnd), + }, + }, + expected: pastTimeStart.AddDate(0, 0, 1).Format("2006-01-02T15:04+00"), + }, + { + name: "weekday maintenance windows is later today", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime(futureWindowTimeStart), + EndTime: mustParseTime(futureWindowTimeEnd), + }, + }, + expected: futureTimeStart.Format("2006-01-02T15:04+00"), + }, + { + name: "weekday maintenance windows is passed for today", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime(pastWindowTimeStart), + EndTime: mustParseTime(pastWindowTimeEnd), + }, + }, + expected: pastTimeStart.AddDate(0, 0, 7).Format("2006-01-02T15:04+00"), + }, + { + name: "choose the earliest window", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.AddDate(0, 0, 2).Weekday(), + StartTime: mustParseTime(futureWindowTimeStart), + EndTime: mustParseTime(futureWindowTimeEnd), + }, + { + Everyday: true, + StartTime: mustParseTime(pastWindowTimeStart), + EndTime: mustParseTime(pastWindowTimeEnd), + }, + }, + expected: pastTimeStart.AddDate(0, 0, 1).Format("2006-01-02T15:04+00"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cluster.Spec.MaintenanceWindows = tt.windows + schedule := cluster.GetSwitchoverSchedule() + if schedule != tt.expected { + t.Errorf("Expected GetSwitchoverSchedule to return %s, returned: %s", tt.expected, schedule) + } + }) + } +} diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index c551f0a8f..ac4ce67d8 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -2,7 +2,9 @@ package cluster import ( "context" + "encoding/json" "fmt" + "reflect" "strings" "time" @@ -590,7 +592,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Lack of connection pooler objects is not a fatal error, just log it if // it was present before in the manifest if c.ConnectionPooler[role] == nil || role == "" { - c.logger.Debugf("no connection pooler to delete") + c.logger.Debug("no connection pooler to delete") return nil } @@ -610,7 +612,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { Delete(context.TODO(), deployment.Name, options) if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("connection pooler deployment was already deleted") + c.logger.Debugf("connection pooler deployment %s for role %s has already been deleted", deployment.Name, role) } else if err != nil { return fmt.Errorf("could not delete connection pooler deployment: %v", err) } @@ -621,7 +623,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { // Repeat the same for the service object service := c.ConnectionPooler[role].Service if service == nil { - c.logger.Debugf("no connection pooler service object to delete") + c.logger.Debug("no connection pooler service object to delete") } else { err = c.KubeClient. @@ -629,7 +631,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) { Delete(context.TODO(), service.Name, options) if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("connection pooler service was already deleted") + c.logger.Debugf("connection pooler service %s for role %s has already been already deleted", service.Name, role) } else if err != nil { return fmt.Errorf("could not delete connection pooler service: %v", err) } @@ -654,7 +656,7 @@ func (c *Cluster) deleteConnectionPoolerSecret() (err error) { if err != nil { c.logger.Debugf("could not get connection pooler secret %s: %v", secretName, err) } else { - if err = c.deleteSecret(secret.UID, *secret); err != nil { + if err = c.deleteSecret(secret.UID); err != nil { return fmt.Errorf("could not delete pooler secret: %v", err) } } @@ -663,11 +665,19 @@ func (c *Cluster) deleteConnectionPoolerSecret() (err error) { // Perform actual patching of a connection pooler deployment, assuming that all // the check were already done before. -func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDeployment *appsv1.Deployment) (*appsv1.Deployment, error) { +func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDeployment *appsv1.Deployment, doUpdate bool) (*appsv1.Deployment, error) { if newDeployment == nil { return nil, fmt.Errorf("there is no connection pooler in the cluster") } + if doUpdate { + updatedDeployment, err := KubeClient.Deployments(newDeployment.Namespace).Update(context.TODO(), newDeployment, metav1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("could not update pooler deployment to match desired state: %v", err) + } + return updatedDeployment, nil + } + patchData, err := specPatch(newDeployment.Spec) if err != nil { return nil, fmt.Errorf("could not form patch for the connection pooler deployment: %v", err) @@ -691,8 +701,8 @@ func updateConnectionPoolerDeployment(KubeClient k8sutil.KubernetesClient, newDe return deployment, nil } -// updateConnectionPoolerAnnotations updates the annotations of connection pooler deployment -func updateConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, deployment *appsv1.Deployment, annotations map[string]string) (*appsv1.Deployment, error) { +// patchConnectionPoolerAnnotations updates the annotations of connection pooler deployment +func patchConnectionPoolerAnnotations(KubeClient k8sutil.KubernetesClient, deployment *appsv1.Deployment, annotations map[string]string) (*appsv1.Deployment, error) { patchData, err := metaAnnotationsPatch(annotations) if err != nil { return nil, fmt.Errorf("could not form patch for the connection pooler deployment metadata: %v", err) @@ -751,6 +761,7 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1. if spec == nil { spec = &acidv1.ConnectionPooler{} } + if spec.NumberOfInstances == nil && *deployment.Spec.Replicas != *config.NumberOfInstances { @@ -821,12 +832,12 @@ func (c *Cluster) needSyncConnectionPoolerDefaults(Config *Config, spec *acidv1. func makeDefaultConnectionPoolerResources(config *config.Config) acidv1.Resources { defaultRequests := acidv1.ResourceDescription{ - CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, - Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, + CPU: &config.ConnectionPooler.ConnectionPoolerDefaultCPURequest, + Memory: &config.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest, } defaultLimits := acidv1.ResourceDescription{ - CPU: config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, - Memory: config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, + CPU: &config.ConnectionPooler.ConnectionPoolerDefaultCPULimit, + Memory: &config.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit, } return acidv1.Resources{ @@ -967,6 +978,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql err error ) + updatedPodAnnotations := map[string]*string{} syncReason := make([]string, 0) deployment, err = c.KubeClient. Deployments(c.Namespace). @@ -1014,18 +1026,48 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql newConnectionPooler = &acidv1.ConnectionPooler{} } - var specSync bool + var specSync, updateDeployment bool var specReason []string + if !reflect.DeepEqual(deployment.ObjectMeta.OwnerReferences, c.ownerReferences()) { + c.logger.Info("new connection pooler owner references do not match the current ones") + updateDeployment = true + } + if oldSpec != nil { specSync, specReason = needSyncConnectionPoolerSpecs(oldConnectionPooler, newConnectionPooler, c.logger) syncReason = append(syncReason, specReason...) } + newPodAnnotations := c.annotationsSet(c.generatePodAnnotations(&c.Spec)) + deletedPodAnnotations := []string{} + if changed, reason := c.compareAnnotations(deployment.Spec.Template.Annotations, newPodAnnotations, &deletedPodAnnotations); changed { + specSync = true + syncReason = append(syncReason, []string{"new connection pooler's pod template annotations do not match the current ones: " + reason}...) + + for _, anno := range deletedPodAnnotations { + updatedPodAnnotations[anno] = nil + } + templateMetadataReq := map[string]map[string]map[string]map[string]map[string]*string{ + "spec": {"template": {"metadata": {"annotations": updatedPodAnnotations}}}} + patch, err := json.Marshal(templateMetadataReq) + if err != nil { + return nil, fmt.Errorf("could not marshal ObjectMeta for %s connection pooler's pod template: %v", role, err) + } + deployment, err = c.KubeClient.Deployments(c.Namespace).Patch(context.TODO(), + deployment.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + c.logger.Errorf("failed to patch %s connection pooler's pod template: %v", role, err) + return nil, err + } + + deployment.Spec.Template.Annotations = newPodAnnotations + } + defaultsSync, defaultsReason := c.needSyncConnectionPoolerDefaults(&c.Config, newConnectionPooler, deployment) syncReason = append(syncReason, defaultsReason...) - if specSync || defaultsSync { + if specSync || defaultsSync || updateDeployment { c.logger.Infof("update connection pooler deployment %s, reason: %+v", c.connectionPoolerName(role), syncReason) newDeployment, err = c.generateConnectionPoolerDeployment(c.ConnectionPooler[role]) @@ -1033,22 +1075,22 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql return syncReason, fmt.Errorf("could not generate deployment for connection pooler: %v", err) } - deployment, err = updateConnectionPoolerDeployment(c.KubeClient, newDeployment) + deployment, err = updateConnectionPoolerDeployment(c.KubeClient, newDeployment, updateDeployment) if err != nil { return syncReason, err } c.ConnectionPooler[role].Deployment = deployment } - } - newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(c.ConnectionPooler[role].Deployment.Annotations)) - if newAnnotations != nil { - deployment, err = updateConnectionPoolerAnnotations(c.KubeClient, c.ConnectionPooler[role].Deployment, newAnnotations) - if err != nil { - return nil, err + newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(nil)) // including the downscaling annotations + if changed, _ := c.compareAnnotations(deployment.Annotations, newAnnotations, nil); changed { + deployment, err = patchConnectionPoolerAnnotations(c.KubeClient, deployment, newAnnotations) + if err != nil { + return nil, err + } + c.ConnectionPooler[role].Deployment = deployment } - c.ConnectionPooler[role].Deployment = deployment } // check if pooler pods must be replaced due to secret update @@ -1076,22 +1118,32 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return nil, fmt.Errorf("could not delete pooler pod: %v", err) } + } else if changed, _ := c.compareAnnotations(pod.Annotations, deployment.Spec.Template.Annotations, nil); changed { + metadataReq := map[string]map[string]map[string]*string{"metadata": {}} + + for anno, val := range deployment.Spec.Template.Annotations { + updatedPodAnnotations[anno] = &val + } + metadataReq["metadata"]["annotations"] = updatedPodAnnotations + patch, err := json.Marshal(metadataReq) + if err != nil { + return nil, fmt.Errorf("could not marshal ObjectMeta for %s connection pooler's pods: %v", role, err) + } + _, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + return nil, fmt.Errorf("could not patch annotations for %s connection pooler's pod %q: %v", role, pod.Name, err) + } } } if service, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}); err == nil { c.ConnectionPooler[role].Service = service desiredSvc := c.generateConnectionPoolerService(c.ConnectionPooler[role]) - if match, reason := c.compareServices(service, desiredSvc); !match { - syncReason = append(syncReason, reason) - c.logServiceChanges(role, service, desiredSvc, false, reason) - newService, err = c.updateService(role, service, desiredSvc) - if err != nil { - return syncReason, fmt.Errorf("could not update %s service to match desired state: %v", role, err) - } - c.ConnectionPooler[role].Service = newService - c.logger.Infof("%s service %q is in the desired state now", role, util.NameFromMeta(desiredSvc.ObjectMeta)) + newService, err = c.updateService(role, service, desiredSvc) + if err != nil { + return syncReason, fmt.Errorf("could not update %s service to match desired state: %v", role, err) } + c.ConnectionPooler[role].Service = newService return NoSync, nil } diff --git a/pkg/cluster/connection_pooler_test.go b/pkg/cluster/connection_pooler_test.go index f83d369b5..78d1c2527 100644 --- a/pkg/cluster/connection_pooler_test.go +++ b/pkg/cluster/connection_pooler_test.go @@ -2,7 +2,6 @@ package cluster import ( "context" - "errors" "fmt" "strings" "testing" @@ -711,47 +710,42 @@ func TestConnectionPoolerPodSpec(t *testing.T) { noCheck := func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { return nil } tests := []struct { - subTest string - spec *acidv1.PostgresSpec - expected error - cluster *Cluster - check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error + subTest string + spec *acidv1.PostgresSpec + cluster *Cluster + check func(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error }{ { subTest: "default configuration", spec: &acidv1.PostgresSpec{ ConnectionPooler: &acidv1.ConnectionPooler{}, }, - expected: nil, - cluster: cluster, - check: noCheck, + cluster: cluster, + check: noCheck, }, { subTest: "pooler uses pod service account", spec: &acidv1.PostgresSpec{ ConnectionPooler: &acidv1.ConnectionPooler{}, }, - expected: nil, - cluster: cluster, - check: testServiceAccount, + cluster: cluster, + check: testServiceAccount, }, { subTest: "no default resources", spec: &acidv1.PostgresSpec{ ConnectionPooler: &acidv1.ConnectionPooler{}, }, - expected: errors.New(`could not generate resource requirements: could not fill resource requests: could not parse default CPU quantity: quantities must match the regular expression '^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$'`), - cluster: clusterNoDefaultRes, - check: noCheck, + cluster: clusterNoDefaultRes, + check: noCheck, }, { subTest: "default resources are set", spec: &acidv1.PostgresSpec{ ConnectionPooler: &acidv1.ConnectionPooler{}, }, - expected: nil, - cluster: cluster, - check: testResources, + cluster: cluster, + check: testResources, }, { subTest: "labels for service", @@ -759,30 +753,23 @@ func TestConnectionPoolerPodSpec(t *testing.T) { ConnectionPooler: &acidv1.ConnectionPooler{}, EnableReplicaConnectionPooler: boolToPointer(true), }, - expected: nil, - cluster: cluster, - check: testLabels, + cluster: cluster, + check: testLabels, }, { subTest: "required envs", spec: &acidv1.PostgresSpec{ ConnectionPooler: &acidv1.ConnectionPooler{}, }, - expected: nil, - cluster: cluster, - check: testEnvs, + cluster: cluster, + check: testEnvs, }, } for _, role := range [2]PostgresRole{Master, Replica} { for _, tt := range tests { - podSpec, err := tt.cluster.generateConnectionPoolerPodTemplate(role) + podSpec, _ := tt.cluster.generateConnectionPoolerPodTemplate(role) - if err != tt.expected && err.Error() != tt.expected.Error() { - t.Errorf("%s [%s]: Could not generate pod template,\n %+v, expected\n %+v", - testName, tt.subTest, err, tt.expected) - } - - err = tt.check(cluster, podSpec, role) + err := tt.check(cluster, podSpec, role) if err != nil { t.Errorf("%s [%s]: Pod spec is incorrect, %+v", testName, tt.subTest, err) @@ -973,8 +960,8 @@ func TestPoolerTLS(t *testing.T) { TeamID: "myapp", NumberOfInstances: 1, EnableConnectionPooler: util.True(), Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, Volume: acidv1.Volume{ Size: "1G", @@ -982,7 +969,7 @@ func TestPoolerTLS(t *testing.T) { TLS: &acidv1.TLSDescription{ SecretName: tlsSecretName, CAFile: "ca.crt"}, AdditionalVolumes: []acidv1.AdditionalVolume{ - acidv1.AdditionalVolume{ + { Name: tlsSecretName, MountPath: mountPath, VolumeSource: v1.VolumeSource{ @@ -1090,6 +1077,9 @@ func TestConnectionPoolerServiceSpec(t *testing.T) { ConnectionPoolerDefaultMemoryRequest: "100Mi", ConnectionPoolerDefaultMemoryLimit: "100Mi", }, + Resources: config.Resources{ + EnableOwnerReferences: util.True(), + }, }, }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) cluster.Statefulset = &appsv1.StatefulSet{ diff --git a/pkg/cluster/database.go b/pkg/cluster/database.go index 26e29ef56..aac877bcf 100644 --- a/pkg/cluster/database.go +++ b/pkg/cluster/database.go @@ -46,12 +46,15 @@ const ( createExtensionSQL = `CREATE EXTENSION IF NOT EXISTS "%s" SCHEMA "%s"` alterExtensionSQL = `ALTER EXTENSION "%s" SET SCHEMA "%s"` - getPublicationsSQL = `SELECT p.pubname, string_agg(pt.schemaname || '.' || pt.tablename, ', ' ORDER BY pt.schemaname, pt.tablename) + getPublicationsSQL = `SELECT p.pubname, COALESCE(string_agg(pt.schemaname || '.' || pt.tablename, ', ' ORDER BY pt.schemaname, pt.tablename), '') AS pubtables FROM pg_publication p LEFT JOIN pg_publication_tables pt ON pt.pubname = p.pubname + WHERE p.pubowner = 'postgres'::regrole + AND p.pubname LIKE 'fes_%' GROUP BY p.pubname;` createPublicationSQL = `CREATE PUBLICATION "%s" FOR TABLE %s WITH (publish = 'insert, update');` alterPublicationSQL = `ALTER PUBLICATION "%s" SET TABLE %s;` + dropPublicationSQL = `DROP PUBLICATION "%s";` globalDefaultPrivilegesSQL = `SET ROLE TO "%s"; ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO "%s","%s"; @@ -108,7 +111,7 @@ func (c *Cluster) pgConnectionString(dbname string) string { func (c *Cluster) databaseAccessDisabled() bool { if !c.OpConfig.EnableDBAccess { - c.logger.Debugf("database access is disabled") + c.logger.Debug("database access is disabled") } return !c.OpConfig.EnableDBAccess @@ -205,7 +208,11 @@ func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUser } defer func() { if err2 := rows.Close(); err2 != nil { - err = fmt.Errorf("error when closing query cursor: %v", err2) + if err != nil { + err = fmt.Errorf("error when closing query cursor: %v, previous error: %v", err2, err) + } else { + err = fmt.Errorf("error when closing query cursor: %v", err2) + } } }() @@ -252,7 +259,11 @@ func findUsersFromRotation(rotatedUsers []string, db *sql.DB) (map[string]string } defer func() { if err2 := rows.Close(); err2 != nil { - err = fmt.Errorf("error when closing query cursor: %v", err2) + if err != nil { + err = fmt.Errorf("error when closing query cursor: %v, previous error: %v", err2, err) + } else { + err = fmt.Errorf("error when closing query cursor: %v", err2) + } } }() @@ -620,6 +631,14 @@ func (c *Cluster) getPublications() (publications map[string]string, err error) return dbPublications, err } +func (c *Cluster) executeDropPublication(pubName string) error { + c.logger.Infof("dropping publication %q", pubName) + if _, err := c.pgDb.Exec(fmt.Sprintf(dropPublicationSQL, pubName)); err != nil { + return fmt.Errorf("could not execute drop publication: %v", err) + } + return nil +} + // executeCreatePublication creates new publication for given tables // The caller is responsible for opening and closing the database connection. func (c *Cluster) executeCreatePublication(pubName, tableList string) error { diff --git a/pkg/cluster/exec.go b/pkg/cluster/exec.go index 8b5089b4e..5605a70f6 100644 --- a/pkg/cluster/exec.go +++ b/pkg/cluster/exec.go @@ -15,7 +15,7 @@ import ( "github.com/zalando/postgres-operator/pkg/util/constants" ) -//ExecCommand executes arbitrary command inside the pod +// ExecCommand executes arbitrary command inside the pod func (c *Cluster) ExecCommand(podName *spec.NamespacedName, command ...string) (string, error) { c.setProcessName("executing command %q", strings.Join(command, " ")) @@ -59,7 +59,7 @@ func (c *Cluster) ExecCommand(podName *spec.NamespacedName, command ...string) ( return "", fmt.Errorf("failed to init executor: %v", err) } - err = exec.Stream(remotecommand.StreamOptions{ + err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{ Stdout: &execOut, Stderr: &execErr, Tty: false, diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 502886854..fedd6a917 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -20,6 +20,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/labels" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" @@ -28,9 +33,6 @@ import ( "github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/patroni" "github.com/zalando/postgres-operator/pkg/util/retryutil" - "golang.org/x/exp/maps" - batchv1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/labels" ) const ( @@ -45,11 +47,6 @@ const ( operatorPort = 8080 ) -type pgUser struct { - Password string `json:"password"` - Options []string `json:"options"` -} - type patroniDCS struct { TTL uint32 `json:"ttl,omitempty"` LoopWait uint32 `json:"loop_wait,omitempty"` @@ -64,9 +61,8 @@ type patroniDCS struct { } type pgBootstrap struct { - Initdb []interface{} `json:"initdb"` - Users map[string]pgUser `json:"users"` - DCS patroniDCS `json:"dcs,omitempty"` + Initdb []interface{} `json:"initdb"` + DCS patroniDCS `json:"dcs,omitempty"` } type spiloConfiguration struct { @@ -78,19 +74,13 @@ func (c *Cluster) statefulSetName() string { return c.Name } -func (c *Cluster) endpointName(role PostgresRole) string { - name := c.Name - if role == Replica { - name = fmt.Sprintf("%s-%s", name, "repl") - } - - return name -} - func (c *Cluster) serviceName(role PostgresRole) string { name := c.Name - if role == Replica { + switch role { + case Replica: name = fmt.Sprintf("%s-%s", name, "repl") + case Patroni: + name = fmt.Sprintf("%s-%s", name, "config") } return name @@ -119,19 +109,24 @@ func (c *Cluster) servicePort(role PostgresRole) int32 { return pgPort } -func (c *Cluster) podDisruptionBudgetName() string { +func (c *Cluster) PrimaryPodDisruptionBudgetName() string { return c.OpConfig.PDBNameFormat.Format("cluster", c.Name) } +func (c *Cluster) criticalOpPodDisruptionBudgetName() string { + pdbTemplate := config.StringTemplate("postgres-{cluster}-critical-op-pdb") + return pdbTemplate.Format("cluster", c.Name) +} + func makeDefaultResources(config *config.Config) acidv1.Resources { defaultRequests := acidv1.ResourceDescription{ - CPU: config.Resources.DefaultCPURequest, - Memory: config.Resources.DefaultMemoryRequest, + CPU: &config.Resources.DefaultCPURequest, + Memory: &config.Resources.DefaultMemoryRequest, } defaultLimits := acidv1.ResourceDescription{ - CPU: config.Resources.DefaultCPULimit, - Memory: config.Resources.DefaultMemoryLimit, + CPU: &config.Resources.DefaultCPULimit, + Memory: &config.Resources.DefaultMemoryLimit, } return acidv1.Resources{ @@ -143,12 +138,12 @@ func makeDefaultResources(config *config.Config) acidv1.Resources { func makeLogicalBackupResources(config *config.Config) acidv1.Resources { logicalBackupResourceRequests := acidv1.ResourceDescription{ - CPU: config.LogicalBackup.LogicalBackupCPURequest, - Memory: config.LogicalBackup.LogicalBackupMemoryRequest, + CPU: &config.LogicalBackup.LogicalBackupCPURequest, + Memory: &config.LogicalBackup.LogicalBackupMemoryRequest, } logicalBackupResourceLimits := acidv1.ResourceDescription{ - CPU: config.LogicalBackup.LogicalBackupCPULimit, - Memory: config.LogicalBackup.LogicalBackupMemoryLimit, + CPU: &config.LogicalBackup.LogicalBackupCPULimit, + Memory: &config.LogicalBackup.LogicalBackupMemoryLimit, } return acidv1.Resources{ @@ -214,7 +209,9 @@ func (c *Cluster) enforceMaxResourceRequests(resources *v1.ResourceRequirements) return fmt.Errorf("could not compare defined CPU request %s for %q container with configured maximum value %s: %v", cpuRequest.String(), constants.PostgresContainerName, maxCPURequest, err) } - resources.Requests[v1.ResourceCPU] = maxCPU + if !maxCPU.IsZero() { + resources.Requests[v1.ResourceCPU] = maxCPU + } memoryRequest := resources.Requests[v1.ResourceMemory] maxMemoryRequest := c.OpConfig.MaxMemoryRequest @@ -223,7 +220,9 @@ func (c *Cluster) enforceMaxResourceRequests(resources *v1.ResourceRequirements) return fmt.Errorf("could not compare defined memory request %s for %q container with configured maximum value %s: %v", memoryRequest.String(), constants.PostgresContainerName, maxMemoryRequest, err) } - resources.Requests[v1.ResourceMemory] = maxMemory + if !maxMemory.IsZero() { + resources.Requests[v1.ResourceMemory] = maxMemory + } return nil } @@ -240,30 +239,66 @@ func setMemoryRequestToLimit(resources *v1.ResourceRequirements, containerName s } } +func matchLimitsWithRequestsIfSmaller(resources *v1.ResourceRequirements, containerName string, logger *logrus.Entry) { + requests := resources.Requests + limits := resources.Limits + requestCPU, cpuRequestsExists := requests[v1.ResourceCPU] + limitCPU, cpuLimitExists := limits[v1.ResourceCPU] + if cpuRequestsExists && cpuLimitExists && limitCPU.Cmp(requestCPU) == -1 { + logger.Warningf("CPU limit of %s for %q container is increased to match CPU requests of %s", limitCPU.String(), containerName, requestCPU.String()) + resources.Limits[v1.ResourceCPU] = requestCPU + } + + requestMemory, memoryRequestsExists := requests[v1.ResourceMemory] + limitMemory, memoryLimitExists := limits[v1.ResourceMemory] + if memoryRequestsExists && memoryLimitExists && limitMemory.Cmp(requestMemory) == -1 { + logger.Warningf("memory limit of %s for %q container is increased to match memory requests of %s", limitMemory.String(), containerName, requestMemory.String()) + resources.Limits[v1.ResourceMemory] = requestMemory + } +} + func fillResourceList(spec acidv1.ResourceDescription, defaults acidv1.ResourceDescription) (v1.ResourceList, error) { var err error requests := v1.ResourceList{} + emptyResourceExamples := []string{"", "0", "null"} - if spec.CPU != "" { - requests[v1.ResourceCPU], err = resource.ParseQuantity(spec.CPU) + if spec.CPU != nil && !slices.Contains(emptyResourceExamples, *spec.CPU) { + requests[v1.ResourceCPU], err = resource.ParseQuantity(*spec.CPU) if err != nil { return nil, fmt.Errorf("could not parse CPU quantity: %v", err) } } else { - requests[v1.ResourceCPU], err = resource.ParseQuantity(defaults.CPU) - if err != nil { - return nil, fmt.Errorf("could not parse default CPU quantity: %v", err) + if defaults.CPU != nil && !slices.Contains(emptyResourceExamples, *defaults.CPU) { + requests[v1.ResourceCPU], err = resource.ParseQuantity(*defaults.CPU) + if err != nil { + return nil, fmt.Errorf("could not parse default CPU quantity: %v", err) + } } } - if spec.Memory != "" { - requests[v1.ResourceMemory], err = resource.ParseQuantity(spec.Memory) + if spec.Memory != nil && !slices.Contains(emptyResourceExamples, *spec.Memory) { + requests[v1.ResourceMemory], err = resource.ParseQuantity(*spec.Memory) if err != nil { return nil, fmt.Errorf("could not parse memory quantity: %v", err) } } else { - requests[v1.ResourceMemory], err = resource.ParseQuantity(defaults.Memory) + if defaults.Memory != nil && !slices.Contains(emptyResourceExamples, *defaults.Memory) { + requests[v1.ResourceMemory], err = resource.ParseQuantity(*defaults.Memory) + if err != nil { + return nil, fmt.Errorf("could not parse default memory quantity: %v", err) + } + } + } + + if spec.HugePages2Mi != nil { + requests[v1.ResourceHugePagesPrefix+"2Mi"], err = resource.ParseQuantity(*spec.HugePages2Mi) + if err != nil { + return nil, fmt.Errorf("could not parse hugepages-2Mi quantity: %v", err) + } + } + if spec.HugePages1Gi != nil { + requests[v1.ResourceHugePagesPrefix+"1Gi"], err = resource.ParseQuantity(*spec.HugePages1Gi) if err != nil { - return nil, fmt.Errorf("could not parse default memory quantity: %v", err) + return nil, fmt.Errorf("could not parse hugepages-1Gi quantity: %v", err) } } @@ -301,6 +336,10 @@ func (c *Cluster) generateResourceRequirements( } } + // make sure after reflecting default and enforcing min limit values we don't have requests > limits + matchLimitsWithRequestsIfSmaller(&result, containerName, c.logger) + + // vice versa set memory requests to limit if option is enabled if c.OpConfig.SetMemoryRequestToLimit { setMemoryRequestToLimit(&result, containerName, c.logger) } @@ -430,13 +469,6 @@ PatroniInitDBParams: config.PgLocalConfiguration[patroniPGHBAConfParameterName] = patroni.PgHba } - config.Bootstrap.Users = map[string]pgUser{ - opConfig.PamRoleName: { - Password: "", - Options: []string{constants.RoleFlagCreateDB, constants.RoleFlagNoLogin}, - }, - } - res, err := json.Marshal(config) return string(res), err } @@ -630,13 +662,19 @@ func isBootstrapOnlyParameter(param string) bool { } func generateVolumeMounts(volume acidv1.Volume) []v1.VolumeMount { - return []v1.VolumeMount{ + volumeMount := []v1.VolumeMount{ { Name: constants.DataVolumeName, MountPath: constants.PostgresDataMount, //TODO: fetch from manifest - SubPath: volume.SubPath, }, } + + if volume.IsSubPathExpr != nil && *volume.IsSubPathExpr { + volumeMount[0].SubPathExpr = volume.SubPath + } else { + volumeMount[0].SubPath = volume.SubPath + } + return volumeMount } func generateContainer( @@ -706,7 +744,7 @@ func (c *Cluster) generateSidecarContainers(sidecars []acidv1.Sidecar, } // adds common fields to sidecars -func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, superUserName string, credentialsSecretName string, logger *logrus.Entry) []v1.Container { +func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, superUserName string, credentialsSecretName string) []v1.Container { result := []v1.Container{} for _, container := range in { @@ -848,7 +886,7 @@ func (c *Cluster) generatePodTemplate( addSecretVolume(&podSpec, additionalSecretMount, additionalSecretMountPath) } - if additionalVolumes != nil { + if len(additionalVolumes) > 0 { c.addAdditionalVolumes(&podSpec, additionalVolumes) } @@ -972,6 +1010,9 @@ func (c *Cluster) generateSpiloPodEnvVars( if c.patroniUsesKubernetes() { envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"}) + if c.OpConfig.EnablePodDisruptionBudget != nil && *c.OpConfig.EnablePodDisruptionBudget { + envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_BOOTSTRAP_LABELS", Value: "{\"critical-operation\":\"true\"}"}) + } } else { envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost}) } @@ -1145,6 +1186,37 @@ func (c *Cluster) getPodEnvironmentSecretVariables() ([]v1.EnvVar, error) { return secretPodEnvVarsList, nil } +// Return list of variables the cronjob received from the configured Secret +func (c *Cluster) getCronjobEnvironmentSecretVariables() ([]v1.EnvVar, error) { + secretCronjobEnvVarsList := make([]v1.EnvVar, 0) + + if c.OpConfig.LogicalBackupCronjobEnvironmentSecret == "" { + return secretCronjobEnvVarsList, nil + } + + secret, err := c.KubeClient.Secrets(c.Namespace).Get( + context.TODO(), + c.OpConfig.LogicalBackupCronjobEnvironmentSecret, + metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("could not read Secret CronjobEnvironmentSecretName: %v", err) + } + + for k := range secret.Data { + secretCronjobEnvVarsList = append(secretCronjobEnvVarsList, + v1.EnvVar{Name: k, ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: c.OpConfig.LogicalBackupCronjobEnvironmentSecret, + }, + Key: k, + }, + }}) + } + + return secretCronjobEnvVarsList, nil +} + func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.ResourceRequirements) *v1.Container { name := sidecar.Name if name == "" { @@ -1158,6 +1230,7 @@ func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.Resour Resources: *resources, Env: sidecar.Env, Ports: sidecar.Ports, + Command: sidecar.Command, } } @@ -1171,12 +1244,12 @@ func getBucketScopeSuffix(uid string) string { func makeResources(cpuRequest, memoryRequest, cpuLimit, memoryLimit string) acidv1.Resources { return acidv1.Resources{ ResourceRequests: acidv1.ResourceDescription{ - CPU: cpuRequest, - Memory: memoryRequest, + CPU: &cpuRequest, + Memory: &memoryRequest, }, ResourceLimits: acidv1.ResourceDescription{ - CPU: cpuLimit, - Memory: memoryLimit, + CPU: &cpuLimit, + Memory: &memoryLimit, }, } } @@ -1380,7 +1453,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef containerName, containerName) } - sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger) + sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername)) tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) @@ -1455,10 +1528,11 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef statefulSet := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: c.statefulSetName(), - Namespace: c.Namespace, - Labels: c.labelsSet(true), - Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), + Name: c.statefulSetName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), + OwnerReferences: c.ownerReferences(), }, Spec: appsv1.StatefulSetSpec{ Replicas: &numberOfInstances, @@ -1533,7 +1607,7 @@ func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]s for k, v := range c.OpConfig.CustomPodAnnotations { annotations[k] = v } - if spec != nil || spec.PodAnnotations != nil { + if spec.PodAnnotations != nil { for k, v := range spec.PodAnnotations { annotations[k] = v } @@ -1751,11 +1825,18 @@ func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec, for _, additionalVolume := range additionalVolumes { for _, target := range additionalVolume.TargetContainers { if podSpec.Containers[i].Name == target || target == "all" { - mounts = append(mounts, v1.VolumeMount{ + v := v1.VolumeMount{ Name: additionalVolume.Name, MountPath: additionalVolume.MountPath, - SubPath: additionalVolume.SubPath, - }) + } + + if additionalVolume.IsSubPathExpr != nil && *additionalVolume.IsSubPathExpr { + v.SubPathExpr = additionalVolume.SubPath + } else { + v.SubPath = additionalVolume.SubPath + } + + mounts = append(mounts, v) } } } @@ -1787,7 +1868,7 @@ func (c *Cluster) generatePersistentVolumeClaimTemplate(volumeSize, volumeStorag }, Spec: v1.PersistentVolumeClaimSpec{ AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - Resources: v1.ResourceRequirements{ + Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: quantity, }, @@ -1803,18 +1884,16 @@ func (c *Cluster) generatePersistentVolumeClaimTemplate(volumeSize, volumeStorag func (c *Cluster) generateUserSecrets() map[string]*v1.Secret { secrets := make(map[string]*v1.Secret, len(c.pgUsers)+len(c.systemUsers)) - namespace := c.Namespace for username, pgUser := range c.pgUsers { //Skip users with no password i.e. human users (they'll be authenticated using pam) - secret := c.generateSingleUserSecret(pgUser.Namespace, pgUser) + secret := c.generateSingleUserSecret(pgUser) if secret != nil { secrets[username] = secret } - namespace = pgUser.Namespace } /* special case for the system user */ for _, systemUser := range c.systemUsers { - secret := c.generateSingleUserSecret(namespace, systemUser) + secret := c.generateSingleUserSecret(systemUser) if secret != nil { secrets[systemUser.Name] = secret } @@ -1823,7 +1902,7 @@ func (c *Cluster) generateUserSecrets() map[string]*v1.Secret { return secrets } -func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) *v1.Secret { +func (c *Cluster) generateSingleUserSecret(pgUser spec.PgUser) *v1.Secret { //Skip users with no password i.e. human users (they'll be authenticated using pam) if pgUser.Password == "" { if pgUser.Origin != spec.RoleOriginTeamsAPI { @@ -1847,12 +1926,21 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) lbls = c.connectionPoolerLabels("", false).MatchLabels } + // if secret lives in another namespace we cannot set ownerReferences + var ownerReferences []metav1.OwnerReference + if c.Config.OpConfig.EnableCrossNamespaceSecret && strings.Contains(username, ".") { + ownerReferences = nil + } else { + ownerReferences = c.ownerReferences() + } + secret := v1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: c.credentialSecretName(username), - Namespace: pgUser.Namespace, - Labels: lbls, - Annotations: c.annotationsSet(nil), + Name: c.credentialSecretName(username), + Namespace: pgUser.Namespace, + Labels: lbls, + Annotations: c.annotationsSet(nil), + OwnerReferences: ownerReferences, }, Type: v1.SecretTypeOpaque, Data: map[string][]byte{ @@ -1910,10 +1998,11 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: c.serviceName(role), - Namespace: c.Namespace, - Labels: c.roleLabelsSet(true, role), - Annotations: c.annotationsSet(c.generateServiceAnnotations(role, spec)), + Name: c.serviceName(role), + Namespace: c.Namespace, + Labels: c.roleLabelsSet(true, role), + Annotations: c.annotationsSet(c.generateServiceAnnotations(role, spec)), + OwnerReferences: c.ownerReferences(), }, Spec: serviceSpec, } @@ -1979,9 +2068,11 @@ func (c *Cluster) getCustomServiceAnnotations(role PostgresRole, spec *acidv1.Po func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints { endpoints := &v1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ - Name: c.endpointName(role), - Namespace: c.Namespace, - Labels: c.roleLabelsSet(true, role), + Name: c.serviceName(role), + Namespace: c.Namespace, + Annotations: c.annotationsSet(nil), + Labels: c.roleLabelsSet(true, role), + OwnerReferences: c.ownerReferences(), }, } if len(subsets) > 0 { @@ -2124,26 +2215,63 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript return result } -func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget { +func (c *Cluster) generatePrimaryPodDisruptionBudget() *policyv1.PodDisruptionBudget { minAvailable := intstr.FromInt(1) pdbEnabled := c.OpConfig.EnablePodDisruptionBudget + pdbMasterLabelSelector := c.OpConfig.PDBMasterLabelSelector // if PodDisruptionBudget is disabled or if there are no DB pods, set the budget to 0. if (pdbEnabled != nil && !(*pdbEnabled)) || c.Spec.NumberOfInstances <= 0 { minAvailable = intstr.FromInt(0) } + // define label selector and add the master role selector if enabled + labels := c.labelsSet(false) + if pdbMasterLabelSelector == nil || *c.OpConfig.PDBMasterLabelSelector { + labels[c.OpConfig.PodRoleLabel] = string(Master) + } + return &policyv1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ - Name: c.podDisruptionBudgetName(), - Namespace: c.Namespace, - Labels: c.labelsSet(true), - Annotations: c.annotationsSet(nil), + Name: c.PrimaryPodDisruptionBudgetName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), + OwnerReferences: c.ownerReferences(), + }, + Spec: policyv1.PodDisruptionBudgetSpec{ + MinAvailable: &minAvailable, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, + } +} + +func (c *Cluster) generateCriticalOpPodDisruptionBudget() *policyv1.PodDisruptionBudget { + minAvailable := intstr.FromInt32(c.Spec.NumberOfInstances) + pdbEnabled := c.OpConfig.EnablePodDisruptionBudget + + // if PodDisruptionBudget is disabled or if there are no DB pods, set the budget to 0. + if (pdbEnabled != nil && !(*pdbEnabled)) || c.Spec.NumberOfInstances <= 0 { + minAvailable = intstr.FromInt(0) + } + + labels := c.labelsSet(false) + labels["critical-operation"] = "true" + + return &policyv1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.criticalOpPodDisruptionBudgetName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), + OwnerReferences: c.ownerReferences(), }, Spec: policyv1.PodDisruptionBudgetSpec{ MinAvailable: &minAvailable, Selector: &metav1.LabelSelector{ - MatchLabels: c.roleLabelsSet(false, Master), + MatchLabels: labels, }, }, } @@ -2166,6 +2294,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { resourceRequirements *v1.ResourceRequirements ) + spec := &c.Spec + // NB: a cron job creates standard batch jobs according to schedule; these batch jobs manage pods and clean-up c.logger.Debug("Generating logical backup pod template") @@ -2180,7 +2310,13 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { return nil, fmt.Errorf("could not generate resource requirements for logical backup pods: %v", err) } + secretEnvVarsList, err := c.getCronjobEnvironmentSecretVariables() + if err != nil { + return nil, err + } + envVars := c.generateLogicalBackupPodEnvVars() + envVars = append(envVars, secretEnvVarsList...) logicalBackupContainer := generateContainer( logicalBackupContainerName, &c.OpConfig.LogicalBackup.LogicalBackupDockerImage, @@ -2192,11 +2328,12 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { nil, ) - labels := map[string]string{ - c.OpConfig.ClusterNameLabel: c.Name, - "application": "spilo-logical-backup", + logicalBackupJobLabel := map[string]string{ + "application": "spilo-logical-backup", } + labels := labels.Merge(c.labelsSet(true), logicalBackupJobLabel) + nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, nil) podAffinity := podAffinity( labels, @@ -2208,16 +2345,18 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { annotations := c.generatePodAnnotations(&c.Spec) + tolerationsSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) + // re-use the method that generates DB pod templates if podTemplate, err = c.generatePodTemplate( c.Namespace, labels, - annotations, + c.annotationsSet(annotations), logicalBackupContainer, []v1.Container{}, []v1.Container{}, util.False(), - &[]v1.Toleration{}, + &tolerationsSpec, nil, nil, nil, @@ -2260,10 +2399,11 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { cronJob := &batchv1.CronJob{ ObjectMeta: metav1.ObjectMeta{ - Name: c.getLogicalBackupJobName(), - Namespace: c.Namespace, - Labels: c.labelsSet(true), - Annotations: c.annotationsSet(nil), + Name: c.getLogicalBackupJobName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), + OwnerReferences: c.ownerReferences(), }, Spec: batchv1.CronJobSpec{ Schedule: schedule, @@ -2277,6 +2417,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { + backupProvider := c.OpConfig.LogicalBackup.LogicalBackupProvider + envVars := []v1.EnvVar{ { Name: "SCOPE", @@ -2295,51 +2437,6 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { }, }, }, - // Bucket env vars - { - Name: "LOGICAL_BACKUP_PROVIDER", - Value: c.OpConfig.LogicalBackup.LogicalBackupProvider, - }, - { - Name: "LOGICAL_BACKUP_S3_BUCKET", - Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket, - }, - { - Name: "LOGICAL_BACKUP_S3_REGION", - Value: c.OpConfig.LogicalBackup.LogicalBackupS3Region, - }, - { - Name: "LOGICAL_BACKUP_S3_ENDPOINT", - Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint, - }, - { - Name: "LOGICAL_BACKUP_S3_SSE", - Value: c.OpConfig.LogicalBackup.LogicalBackupS3SSE, - }, - { - Name: "LOGICAL_BACKUP_S3_RETENTION_TIME", - Value: c.OpConfig.LogicalBackup.LogicalBackupS3RetentionTime, - }, - { - Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX", - Value: getBucketScopeSuffix(string(c.Postgresql.GetUID())), - }, - { - Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS", - Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials, - }, - { - Name: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_NAME", - Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageAccountName, - }, - { - Name: "LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER", - Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageContainer, - }, - { - Name: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY", - Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageAccountKey, - }, // Postgres env vars { Name: "PG_VERSION", @@ -2372,19 +2469,85 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { }, }, }, + // Bucket env vars + { + Name: "LOGICAL_BACKUP_PROVIDER", + Value: backupProvider, + }, + { + Name: "LOGICAL_BACKUP_S3_BUCKET", + Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket, + }, + { + Name: "LOGICAL_BACKUP_S3_BUCKET_PREFIX", + Value: c.OpConfig.LogicalBackup.LogicalBackupS3BucketPrefix, + }, + { + Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX", + Value: getBucketScopeSuffix(string(c.Postgresql.GetUID())), + }, } - if c.OpConfig.LogicalBackup.LogicalBackupS3AccessKeyID != "" { - envVars = append(envVars, v1.EnvVar{Name: "AWS_ACCESS_KEY_ID", Value: c.OpConfig.LogicalBackup.LogicalBackupS3AccessKeyID}) - } + switch backupProvider { + case "s3": + envVars = appendEnvVars(envVars, []v1.EnvVar{ + { + Name: "LOGICAL_BACKUP_S3_REGION", + Value: c.OpConfig.LogicalBackup.LogicalBackupS3Region, + }, + { + Name: "LOGICAL_BACKUP_S3_ENDPOINT", + Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint, + }, + { + Name: "LOGICAL_BACKUP_S3_SSE", + Value: c.OpConfig.LogicalBackup.LogicalBackupS3SSE, + }, + { + Name: "LOGICAL_BACKUP_S3_RETENTION_TIME", + Value: c.getLogicalBackupRetentionTime(), + }}...) + + if c.OpConfig.LogicalBackup.LogicalBackupS3AccessKeyID != "" { + envVars = append(envVars, v1.EnvVar{Name: "AWS_ACCESS_KEY_ID", Value: c.OpConfig.LogicalBackup.LogicalBackupS3AccessKeyID}) + } + + if c.OpConfig.LogicalBackup.LogicalBackupS3SecretAccessKey != "" { + envVars = append(envVars, v1.EnvVar{Name: "AWS_SECRET_ACCESS_KEY", Value: c.OpConfig.LogicalBackup.LogicalBackupS3SecretAccessKey}) + } + + case "gcs": + if c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials != "" { + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials}) + } + + case "az": + envVars = appendEnvVars(envVars, []v1.EnvVar{ + { + Name: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_NAME", + Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageAccountName, + }, + { + Name: "LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER", + Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageContainer, + }}...) - if c.OpConfig.LogicalBackup.LogicalBackupS3SecretAccessKey != "" { - envVars = append(envVars, v1.EnvVar{Name: "AWS_SECRET_ACCESS_KEY", Value: c.OpConfig.LogicalBackup.LogicalBackupS3SecretAccessKey}) + if c.OpConfig.LogicalBackup.LogicalBackupAzureStorageAccountKey != "" { + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY", Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageAccountKey}) + } } return envVars } +func (c *Cluster) getLogicalBackupRetentionTime() (retentionTime string) { + if c.Spec.LogicalBackupRetention != "" { + return c.Spec.LogicalBackupRetention + } + + return c.OpConfig.LogicalBackup.LogicalBackupS3RetentionTime +} + // getLogicalBackupJobName returns the name; the job itself may not exists func (c *Cluster) getLogicalBackupJobName() (jobName string) { return trimCronjobName(fmt.Sprintf("%s%s", c.OpConfig.LogicalBackupJobPrefix, c.clusterName().Name)) @@ -2397,22 +2560,26 @@ func (c *Cluster) getLogicalBackupJobName() (jobName string) { // survived, we can't delete an object because it will affect the functioning // cluster). func (c *Cluster) ownerReferences() []metav1.OwnerReference { - controller := true + currentOwnerReferences := c.ObjectMeta.OwnerReferences + if c.OpConfig.EnableOwnerReferences == nil || !*c.OpConfig.EnableOwnerReferences { + return currentOwnerReferences + } - if c.Statefulset == nil { - c.logger.Warning("Cannot get owner reference, no statefulset") - return []metav1.OwnerReference{} + for _, ownerRef := range currentOwnerReferences { + if ownerRef.UID == c.Postgresql.ObjectMeta.UID { + return currentOwnerReferences + } } - return []metav1.OwnerReference{ - { - UID: c.Statefulset.ObjectMeta.UID, - APIVersion: "apps/v1", - Kind: "StatefulSet", - Name: c.Statefulset.ObjectMeta.Name, - Controller: &controller, - }, + controllerReference := metav1.OwnerReference{ + UID: c.Postgresql.ObjectMeta.UID, + APIVersion: acidv1.SchemeGroupVersion.Identifier(), + Kind: acidv1.PostgresCRDResourceKind, + Name: c.Postgresql.ObjectMeta.Name, + Controller: util.True(), } + + return append(currentOwnerReferences, controllerReference) } func ensurePath(file string, defaultDir string, defaultFile string) string { diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index a88320deb..137c24081 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -5,9 +5,8 @@ import ( "fmt" "reflect" "sort" - "time" - "testing" + "time" "github.com/stretchr/testify/assert" @@ -73,18 +72,18 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { }{ { subtest: "Patroni default configuration", - pgParam: &acidv1.PostgresqlParam{PgVersion: "15"}, + pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, patroni: &acidv1.Patroni{}, opConfig: &config.Config{ Auth: config.Auth{ PamRoleName: "zalandos", }, }, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/15/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{}}}`, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`, }, { subtest: "Patroni configured", - pgParam: &acidv1.PostgresqlParam{PgVersion: "15"}, + pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, patroni: &acidv1.Patroni{ InitDB: map[string]string{ "encoding": "UTF8", @@ -102,52 +101,39 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { Slots: map[string]map[string]string{"permanent_logical_1": {"type": "logical", "database": "foo", "plugin": "pgoutput"}}, FailsafeMode: util.True(), }, - opConfig: &config.Config{ - Auth: config.Auth{ - PamRoleName: "zalandos", - }, - }, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/15/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`, + opConfig: &config.Config{}, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`, }, { subtest: "Patroni failsafe_mode configured globally", - pgParam: &acidv1.PostgresqlParam{PgVersion: "15"}, + pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, patroni: &acidv1.Patroni{}, opConfig: &config.Config{ - Auth: config.Auth{ - PamRoleName: "zalandos", - }, EnablePatroniFailsafeMode: util.True(), }, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/15/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"failsafe_mode":true}}}`, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`, }, { subtest: "Patroni failsafe_mode configured globally, disabled for cluster", - pgParam: &acidv1.PostgresqlParam{PgVersion: "15"}, + pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, patroni: &acidv1.Patroni{ FailsafeMode: util.False(), }, opConfig: &config.Config{ - Auth: config.Auth{ - PamRoleName: "zalandos", - }, EnablePatroniFailsafeMode: util.True(), }, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/15/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"failsafe_mode":false}}}`, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`, }, { subtest: "Patroni failsafe_mode disabled globally, configured for cluster", - pgParam: &acidv1.PostgresqlParam{PgVersion: "15"}, + pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, patroni: &acidv1.Patroni{ FailsafeMode: util.True(), }, opConfig: &config.Config{ - Auth: config.Auth{ - PamRoleName: "zalandos", - }, EnablePatroniFailsafeMode: util.False(), }, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/15/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"users":{"zalandos":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"failsafe_mode":true}}}`, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`, }, } for _, tt := range tests { @@ -157,8 +143,8 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { t.Errorf("Unexpected error: %v", err) } if tt.result != result { - t.Errorf("%s %s: Spilo Config is %v, expected %v for role %#v and param %#v", - t.Name(), tt.subtest, result, tt.result, tt.opConfig.Auth.PamRoleName, tt.pgParam) + t.Errorf("%s %s: Spilo Config is %v, expected %v and param %#v", + t.Name(), tt.subtest, result, tt.result, tt.pgParam) } } } @@ -178,15 +164,15 @@ func TestExtractPgVersionFromBinPath(t *testing.T) { }, { subTest: "test current bin path against hard coded template", - binPath: "/usr/lib/postgresql/15/bin", + binPath: "/usr/lib/postgresql/17/bin", template: pgBinariesLocationTemplate, - expected: "15", + expected: "17", }, { subTest: "test alternative bin path against a matching template", - binPath: "/usr/pgsql-15/bin", + binPath: "/usr/pgsql-17/bin", template: "/usr/pgsql-%v/bin", - expected: "15", + expected: "17", }, } @@ -205,6 +191,7 @@ func TestExtractPgVersionFromBinPath(t *testing.T) { const ( testPodEnvironmentConfigMapName = "pod_env_cm" testPodEnvironmentSecretName = "pod_env_sc" + testCronjobEnvironmentSecretName = "pod_env_sc" testPodEnvironmentObjectNotExists = "idonotexist" testPodEnvironmentSecretNameAPIError = "pod_env_sc_apierror" testResourceCheckInterval = 3 @@ -461,6 +448,96 @@ func TestPodEnvironmentSecretVariables(t *testing.T) { } +// Test if the keys of an existing secret are properly referenced +func TestCronjobEnvironmentSecretVariables(t *testing.T) { + testName := "TestCronjobEnvironmentSecretVariables" + tests := []struct { + subTest string + opConfig config.Config + envVars []v1.EnvVar + err error + }{ + { + subTest: "No CronjobEnvironmentSecret configured", + envVars: []v1.EnvVar{}, + }, + { + subTest: "Secret referenced by CronjobEnvironmentSecret does not exist", + opConfig: config.Config{ + LogicalBackup: config.LogicalBackup{ + LogicalBackupCronjobEnvironmentSecret: "idonotexist", + }, + }, + err: fmt.Errorf("could not read Secret CronjobEnvironmentSecretName: secret.core \"idonotexist\" not found"), + }, + { + subTest: "Cronjob environment vars reference all keys from secret configured by CronjobEnvironmentSecret", + opConfig: config.Config{ + LogicalBackup: config.LogicalBackup{ + LogicalBackupCronjobEnvironmentSecret: testCronjobEnvironmentSecretName, + }, + }, + envVars: []v1.EnvVar{ + { + Name: "clone_aws_access_key_id", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "clone_aws_access_key_id", + }, + }, + }, + { + Name: "custom_variable", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "custom_variable", + }, + }, + }, + { + Name: "standby_google_application_credentials", + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{ + LocalObjectReference: v1.LocalObjectReference{ + Name: testPodEnvironmentSecretName, + }, + Key: "standby_google_application_credentials", + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + c := newMockCluster(tt.opConfig) + vars, err := c.getCronjobEnvironmentSecretVariables() + sort.Slice(vars, func(i, j int) bool { return vars[i].Name < vars[j].Name }) + if !reflect.DeepEqual(vars, tt.envVars) { + t.Errorf("%s %s: expected `%v` but got `%v`", + testName, tt.subTest, tt.envVars, vars) + } + if tt.err != nil { + if err.Error() != tt.err.Error() { + t.Errorf("%s %s: expected error `%v` but got `%v`", + testName, tt.subTest, tt.err, err) + } + } else { + if err != nil { + t.Errorf("%s %s: expected no error but got error: `%v`", + testName, tt.subTest, err) + } + } + } + +} + func testEnvs(cluster *Cluster, podSpec *v1.PodTemplateSpec, role PostgresRole) error { required := map[string]bool{ "PGHOST": false, @@ -1344,8 +1421,8 @@ func TestNodeAffinity(t *testing.T) { return acidv1.PostgresSpec{ TeamID: "myapp", NumberOfInstances: 1, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, Volume: acidv1.Volume{ Size: "1G", @@ -1374,9 +1451,9 @@ func TestNodeAffinity(t *testing.T) { nodeAff := &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ - v1.NodeSelectorTerm{ + { MatchExpressions: []v1.NodeSelectorRequirement{ - v1.NodeSelectorRequirement{ + { Key: "test-label", Operator: v1.NodeSelectorOpIn, Values: []string{ @@ -1437,8 +1514,8 @@ func TestPodAffinity(t *testing.T) { Spec: acidv1.PostgresSpec{ NumberOfInstances: 1, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, Volume: acidv1.Volume{ Size: "1G", @@ -1489,22 +1566,28 @@ func TestPodAffinity(t *testing.T) { } func testDeploymentOwnerReference(cluster *Cluster, deployment *appsv1.Deployment) error { + if len(deployment.ObjectMeta.OwnerReferences) == 0 { + return nil + } owner := deployment.ObjectMeta.OwnerReferences[0] - if owner.Name != cluster.Statefulset.ObjectMeta.Name { - return fmt.Errorf("Ownere reference is incorrect, got %s, expected %s", - owner.Name, cluster.Statefulset.ObjectMeta.Name) + if owner.Name != cluster.Postgresql.ObjectMeta.Name { + return fmt.Errorf("Owner reference is incorrect, got %s, expected %s", + owner.Name, cluster.Postgresql.ObjectMeta.Name) } return nil } func testServiceOwnerReference(cluster *Cluster, service *v1.Service, role PostgresRole) error { + if len(service.ObjectMeta.OwnerReferences) == 0 { + return nil + } owner := service.ObjectMeta.OwnerReferences[0] - if owner.Name != cluster.Statefulset.ObjectMeta.Name { - return fmt.Errorf("Ownere reference is incorrect, got %s, expected %s", - owner.Name, cluster.Statefulset.ObjectMeta.Name) + if owner.Name != cluster.Postgresql.ObjectMeta.Name { + return fmt.Errorf("Owner reference is incorrect, got %s, expected %s", + owner.Name, cluster.Postgresql.ObjectMeta.Name) } return nil @@ -1581,8 +1664,8 @@ func TestTLS(t *testing.T) { Spec: acidv1.PostgresSpec{ TeamID: "myapp", NumberOfInstances: 1, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, Volume: acidv1.Volume{ Size: "1G", @@ -1590,7 +1673,7 @@ func TestTLS(t *testing.T) { TLS: &acidv1.TLSDescription{ SecretName: tlsSecretName, CAFile: "ca.crt"}, AdditionalVolumes: []acidv1.AdditionalVolume{ - acidv1.AdditionalVolume{ + { Name: tlsSecretName, MountPath: mountPath, VolumeSource: v1.VolumeSource{ @@ -1812,6 +1895,25 @@ func TestAdditionalVolume(t *testing.T) { EmptyDir: &v1.EmptyDirVolumeSource{}, }, }, + { + Name: "test5", + MountPath: "/test5", + SubPath: "subpath", + TargetContainers: nil, // should mount only to postgres + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "test6", + MountPath: "/test6", + SubPath: "$(POD_NAME)", + IsSubPathExpr: util.True(), + TargetContainers: nil, // should mount only to postgres + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, } pg := acidv1.Postgresql{ @@ -1822,11 +1924,13 @@ func TestAdditionalVolume(t *testing.T) { Spec: acidv1.PostgresSpec{ TeamID: "myapp", NumberOfInstances: 1, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, Volume: acidv1.Volume{ - Size: "1G", + Size: "1G", + SubPath: "$(POD_NAME)", + IsSubPathExpr: util.True(), }, AdditionalVolumes: additionalVolumes, Sidecars: []acidv1.Sidecar{ @@ -1858,19 +1962,25 @@ func TestAdditionalVolume(t *testing.T) { assert.NoError(t, err) tests := []struct { - subTest string - container string - expectedMounts []string + subTest string + container string + expectedMounts []string + expectedSubPaths []string + expectedSubPathExprs []string }{ { - subTest: "checking volume mounts of postgres container", - container: constants.PostgresContainerName, - expectedMounts: []string{"pgdata", "test1", "test3", "test4"}, + subTest: "checking volume mounts of postgres container", + container: constants.PostgresContainerName, + expectedMounts: []string{"pgdata", "test1", "test3", "test4", "test5", "test6"}, + expectedSubPaths: []string{"", "", "", "", "subpath", ""}, + expectedSubPathExprs: []string{"$(POD_NAME)", "", "", "", "", "$(POD_NAME)"}, }, { - subTest: "checking volume mounts of sidecar container", - container: "sidecar", - expectedMounts: []string{"pgdata", "test1", "test2"}, + subTest: "checking volume mounts of sidecar container", + container: "sidecar", + expectedMounts: []string{"pgdata", "test1", "test2"}, + expectedSubPaths: []string{"", "", ""}, + expectedSubPathExprs: []string{"$(POD_NAME)", "", ""}, }, } @@ -1880,14 +1990,29 @@ func TestAdditionalVolume(t *testing.T) { continue } mounts := []string{} + subPaths := []string{} + subPathExprs := []string{} + for _, volumeMounts := range container.VolumeMounts { mounts = append(mounts, volumeMounts.Name) + subPaths = append(subPaths, volumeMounts.SubPath) + subPathExprs = append(subPathExprs, volumeMounts.SubPathExpr) } if !util.IsEqualIgnoreOrder(mounts, tt.expectedMounts) { - t.Errorf("%s %s: different volume mounts: got %v, epxected %v", + t.Errorf("%s %s: different volume mounts: got %v, expected %v", t.Name(), tt.subTest, mounts, tt.expectedMounts) } + + if !util.IsEqualIgnoreOrder(subPaths, tt.expectedSubPaths) { + t.Errorf("%s %s: different volume subPaths: got %v, expected %v", + t.Name(), tt.subTest, subPaths, tt.expectedSubPaths) + } + + if !util.IsEqualIgnoreOrder(subPathExprs, tt.expectedSubPathExprs) { + t.Errorf("%s %s: different volume subPathExprs: got %v, expected %v", + t.Name(), tt.subTest, subPathExprs, tt.expectedSubPathExprs) + } } } } @@ -1898,8 +2023,8 @@ func TestVolumeSelector(t *testing.T) { TeamID: "myapp", NumberOfInstances: 0, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, Volume: volume, } @@ -2023,31 +2148,31 @@ func TestSidecars(t *testing.T) { spec = acidv1.PostgresSpec{ PostgresqlParam: acidv1.PostgresqlParam{ - PgVersion: "15", + PgVersion: "17", Parameters: map[string]string{ "max_connections": "100", }, }, TeamID: "myapp", NumberOfInstances: 1, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, Volume: acidv1.Volume{ Size: "1G", }, Sidecars: []acidv1.Sidecar{ - acidv1.Sidecar{ + { Name: "cluster-specific-sidecar", }, - acidv1.Sidecar{ + { Name: "cluster-specific-sidecar-with-resources", Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")}, }, }, - acidv1.Sidecar{ + { Name: "replace-sidecar", DockerImage: "override-image", }, @@ -2075,11 +2200,11 @@ func TestSidecars(t *testing.T) { "deprecated-global-sidecar": "image:123", }, SidecarContainers: []v1.Container{ - v1.Container{ + { Name: "global-sidecar", }, // will be replaced by a cluster specific sidecar with the same name - v1.Container{ + { Name: "replace-sidecar", Image: "replaced-image", }, @@ -2134,7 +2259,7 @@ func TestSidecars(t *testing.T) { }, } mounts := []v1.VolumeMount{ - v1.VolumeMount{ + { Name: "pgdata", MountPath: "/home/postgres/pgdata", }, @@ -2201,13 +2326,81 @@ func TestSidecars(t *testing.T) { } func TestGeneratePodDisruptionBudget(t *testing.T) { + testName := "Test PodDisruptionBudget spec generation" + + hasName := func(pdbName string) func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { + return func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { + if pdbName != podDisruptionBudget.ObjectMeta.Name { + return fmt.Errorf("PodDisruptionBudget name is incorrect, got %s, expected %s", + podDisruptionBudget.ObjectMeta.Name, pdbName) + } + return nil + } + } + + hasMinAvailable := func(expectedMinAvailable int) func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { + return func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { + actual := podDisruptionBudget.Spec.MinAvailable.IntVal + if actual != int32(expectedMinAvailable) { + return fmt.Errorf("PodDisruptionBudget MinAvailable is incorrect, got %d, expected %d", + actual, expectedMinAvailable) + } + return nil + } + } + + testLabelsAndSelectors := func(isPrimary bool) func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { + return func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { + masterLabelSelectorDisabled := cluster.OpConfig.PDBMasterLabelSelector != nil && !*cluster.OpConfig.PDBMasterLabelSelector + if podDisruptionBudget.ObjectMeta.Namespace != "myapp" { + return fmt.Errorf("Object Namespace incorrect.") + } + expectedLabels := map[string]string{"team": "myapp", "cluster-name": "myapp-database"} + if !reflect.DeepEqual(podDisruptionBudget.Labels, expectedLabels) { + return fmt.Errorf("Labels incorrect, got %#v, expected %#v", podDisruptionBudget.Labels, expectedLabels) + } + if !masterLabelSelectorDisabled { + if isPrimary { + expectedLabels := &metav1.LabelSelector{ + MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}} + if !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, expectedLabels) { + return fmt.Errorf("MatchLabels incorrect, got %#v, expected %#v", podDisruptionBudget.Spec.Selector, expectedLabels) + } + } else { + expectedLabels := &metav1.LabelSelector{ + MatchLabels: map[string]string{"cluster-name": "myapp-database", "critical-operation": "true"}} + if !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, expectedLabels) { + return fmt.Errorf("MatchLabels incorrect, got %#v, expected %#v", podDisruptionBudget.Spec.Selector, expectedLabels) + } + } + } + + return nil + } + } + + testPodDisruptionBudgetOwnerReference := func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { + if len(podDisruptionBudget.ObjectMeta.OwnerReferences) == 0 { + return nil + } + owner := podDisruptionBudget.ObjectMeta.OwnerReferences[0] + + if owner.Name != cluster.Postgresql.ObjectMeta.Name { + return fmt.Errorf("Owner reference is incorrect, got %s, expected %s", + owner.Name, cluster.Postgresql.ObjectMeta.Name) + } + + return nil + } + tests := []struct { - c *Cluster - out policyv1.PodDisruptionBudget + scenario string + spec *Cluster + check []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error }{ - // With multiple instances. { - New( + scenario: "With multiple instances", + spec: New( Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ @@ -2215,23 +2408,16 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, logger, eventRecorder), - policyv1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "postgres-myapp-database-pdb", - Namespace: "myapp", - Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, - }, - Spec: policyv1.PodDisruptionBudgetSpec{ - MinAvailable: util.ToIntStr(1), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, - }, - }, + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-pdb"), + hasMinAvailable(1), + testLabelsAndSelectors(true), }, }, - // With zero instances. { - New( + scenario: "With zero instances", + spec: New( Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ @@ -2239,23 +2425,16 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}}, logger, eventRecorder), - policyv1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "postgres-myapp-database-pdb", - Namespace: "myapp", - Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, - }, - Spec: policyv1.PodDisruptionBudgetSpec{ - MinAvailable: util.ToIntStr(0), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, - }, - }, + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-pdb"), + hasMinAvailable(0), + testLabelsAndSelectors(true), }, }, - // With PodDisruptionBudget disabled. { - New( + scenario: "With PodDisruptionBudget disabled", + spec: New( Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ @@ -2263,23 +2442,16 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, logger, eventRecorder), - policyv1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "postgres-myapp-database-pdb", - Namespace: "myapp", - Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, - }, - Spec: policyv1.PodDisruptionBudgetSpec{ - MinAvailable: util.ToIntStr(0), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, - }, - }, + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-pdb"), + hasMinAvailable(0), + testLabelsAndSelectors(true), }, }, - // With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled. { - New( + scenario: "With non-default PDBNameFormat and PodDisruptionBudget explicitly enabled", + spec: New( Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-databass-budget", EnablePodDisruptionBudget: util.True()}}, k8sutil.KubernetesClient{}, acidv1.Postgresql{ @@ -2287,26 +2459,143 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, logger, eventRecorder), - policyv1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "postgres-myapp-database-databass-budget", - Namespace: "myapp", - Labels: map[string]string{"team": "myapp", "cluster-name": "myapp-database"}, - }, - Spec: policyv1.PodDisruptionBudgetSpec{ - MinAvailable: util.ToIntStr(1), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}, - }, - }, + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-databass-budget"), + hasMinAvailable(1), + testLabelsAndSelectors(true), + }, + }, + { + scenario: "With PDBMasterLabelSelector disabled", + spec: New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True(), PDBMasterLabelSelector: util.False()}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-pdb"), + hasMinAvailable(1), + testLabelsAndSelectors(true), + }, + }, + { + scenario: "With OwnerReference enabled", + spec: New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-pdb"), + hasMinAvailable(1), + testLabelsAndSelectors(true), }, }, } for _, tt := range tests { - result := tt.c.generatePodDisruptionBudget() - if !reflect.DeepEqual(*result, tt.out) { - t.Errorf("Expected PodDisruptionBudget: %#v, got %#v", tt.out, *result) + result := tt.spec.generatePrimaryPodDisruptionBudget() + for _, check := range tt.check { + err := check(tt.spec, result) + if err != nil { + t.Errorf("%s [%s]: PodDisruptionBudget spec is incorrect, %+v", + testName, tt.scenario, err) + } + } + } + + testCriticalOp := []struct { + scenario string + spec *Cluster + check []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error + }{ + { + scenario: "With multiple instances", + spec: New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-critical-op-pdb"), + hasMinAvailable(3), + testLabelsAndSelectors(false), + }, + }, + { + scenario: "With zero instances", + spec: New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}}, + logger, + eventRecorder), + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-critical-op-pdb"), + hasMinAvailable(0), + testLabelsAndSelectors(false), + }, + }, + { + scenario: "With PodDisruptionBudget disabled", + spec: New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-critical-op-pdb"), + hasMinAvailable(0), + testLabelsAndSelectors(false), + }, + }, + { + scenario: "With OwnerReference enabled", + spec: New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-critical-op-pdb"), + hasMinAvailable(3), + testLabelsAndSelectors(false), + }, + }, + } + + for _, tt := range testCriticalOp { + result := tt.spec.generateCriticalOpPodDisruptionBudget() + for _, check := range tt.check { + err := check(tt.spec, result) + if err != nil { + t.Errorf("%s [%s]: PodDisruptionBudget spec is incorrect, %+v", + testName, tt.scenario, err) + } } } } @@ -2318,24 +2607,24 @@ func TestGenerateService(t *testing.T) { spec = acidv1.PostgresSpec{ TeamID: "myapp", NumberOfInstances: 1, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, Volume: acidv1.Volume{ Size: "1G", }, Sidecars: []acidv1.Sidecar{ - acidv1.Sidecar{ + { Name: "cluster-specific-sidecar", }, - acidv1.Sidecar{ + { Name: "cluster-specific-sidecar-with-resources", Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")}, }, }, - acidv1.Sidecar{ + { Name: "replace-sidecar", DockerImage: "override-image", }, @@ -2364,11 +2653,11 @@ func TestGenerateService(t *testing.T) { "deprecated-global-sidecar": "image:123", }, SidecarContainers: []v1.Container{ - v1.Container{ + { Name: "global-sidecar", }, // will be replaced by a cluster specific sidecar with the same name - v1.Container{ + { Name: "replace-sidecar", Image: "replaced-image", }, @@ -2463,27 +2752,27 @@ func newLBFakeClient() (k8sutil.KubernetesClient, *fake.Clientset) { func getServices(serviceType v1.ServiceType, sourceRanges []string, extTrafficPolicy, clusterName string) []v1.ServiceSpec { return []v1.ServiceSpec{ - v1.ServiceSpec{ + { ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy), LoadBalancerSourceRanges: sourceRanges, Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Type: serviceType, }, - v1.ServiceSpec{ + { ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy), LoadBalancerSourceRanges: sourceRanges, Ports: []v1.ServicePort{{Name: clusterName + "-pooler", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Selector: map[string]string{"connection-pooler": clusterName + "-pooler"}, Type: serviceType, }, - v1.ServiceSpec{ + { ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy), LoadBalancerSourceRanges: sourceRanges, Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Selector: map[string]string{"spilo-role": "replica", "application": "spilo", "cluster-name": clusterName}, Type: serviceType, }, - v1.ServiceSpec{ + { ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy), LoadBalancerSourceRanges: sourceRanges, Ports: []v1.ServicePort{{Name: clusterName + "-pooler-repl", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, @@ -2545,8 +2834,8 @@ func TestEnableLoadBalancers(t *testing.T) { EnableReplicaPoolerLoadBalancer: util.False(), NumberOfInstances: 1, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, TeamID: "acid", Volume: acidv1.Volume{ @@ -2592,8 +2881,8 @@ func TestEnableLoadBalancers(t *testing.T) { EnableReplicaPoolerLoadBalancer: util.True(), NumberOfInstances: 1, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("10")}, }, TeamID: "acid", Volume: acidv1.Volume{ @@ -2640,7 +2929,7 @@ func TestGenerateResourceRequirements(t *testing.T) { clusterNameLabel := "cluster-name" sidecarName := "postgres-exporter" - // enforceMinResourceLimits will be called 2 twice emitting 4 events (2x cpu, 2x memory raise) + // enforceMinResourceLimits will be called 2 times emitting 4 events (2x cpu, 2x memory raise) // enforceMaxResourceRequests will be called 4 times emitting 6 events (2x cpu, 4x memory cap) // hence event bufferSize of 10 is required newEventRecorder := record.NewFakeRecorder(10) @@ -2685,8 +2974,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")}, }, }, { @@ -2703,7 +2992,7 @@ func TestGenerateResourceRequirements(t *testing.T) { }, Spec: acidv1.PostgresSpec{ Sidecars: []acidv1.Sidecar{ - acidv1.Sidecar{ + { Name: sidecarName, }, }, @@ -2714,8 +3003,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")}, }, }, { @@ -2732,7 +3021,7 @@ func TestGenerateResourceRequirements(t *testing.T) { }, Spec: acidv1.PostgresSpec{ Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "50m", Memory: "50Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("50m"), Memory: k8sutil.StringToPointer("50Mi")}, }, TeamID: "acid", Volume: acidv1.Volume{ @@ -2741,8 +3030,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "50m", Memory: "50Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("50m"), Memory: k8sutil.StringToPointer("50Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")}, }, }, { @@ -2759,8 +3048,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, Spec: acidv1.PostgresSpec{ Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{Memory: "100Mi"}, - ResourceLimits: acidv1.ResourceDescription{Memory: "1Gi"}, + ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("1Gi")}, }, TeamID: "acid", Volume: acidv1.Volume{ @@ -2769,16 +3058,22 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "1Gi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("1Gi")}, }, }, { - subTest: "test SetMemoryRequestToLimit flag", + subTest: "test generation of resources when default is not defined", config: config.Config{ - Resources: configResources, + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: clusterNameLabel, + DefaultCPURequest: "100m", + DefaultMemoryRequest: "100Mi", + PodRoleLabel: "spilo-role", + }, PodManagementPolicy: "ordered_ready", - SetMemoryRequestToLimit: true, + SetMemoryRequestToLimit: false, }, pgSpec: acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{ @@ -2786,10 +3081,6 @@ func TestGenerateResourceRequirements(t *testing.T) { Namespace: namespace, }, Spec: acidv1.PostgresSpec{ - Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{Memory: "200Mi"}, - ResourceLimits: acidv1.ResourceDescription{Memory: "300Mi"}, - }, TeamID: "acid", Volume: acidv1.Volume{ Size: "1G", @@ -2797,16 +3088,27 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "300Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "300Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, }, }, { - subTest: "test SetMemoryRequestToLimit flag for sidecar container, too", + subTest: "test generation of resources when min limits are all set to zero", config: config.Config{ - Resources: configResources, + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: clusterNameLabel, + DefaultCPURequest: "0", + DefaultCPULimit: "0", + MaxCPURequest: "0", + MinCPULimit: "0", + DefaultMemoryRequest: "0", + DefaultMemoryLimit: "0", + MaxMemoryRequest: "0", + MinMemoryLimit: "0", + PodRoleLabel: "spilo-role", + }, PodManagementPolicy: "ordered_ready", - SetMemoryRequestToLimit: true, + SetMemoryRequestToLimit: false, }, pgSpec: acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{ @@ -2814,12 +3116,126 @@ func TestGenerateResourceRequirements(t *testing.T) { Namespace: namespace, }, Spec: acidv1.PostgresSpec{ - Sidecars: []acidv1.Sidecar{ - acidv1.Sidecar{ + Resources: &acidv1.Resources{ + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("5m"), Memory: k8sutil.StringToPointer("5Mi")}, + }, + TeamID: "acid", + Volume: acidv1.Volume{ + Size: "1G", + }, + }, + }, + expectedResources: acidv1.Resources{ + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("5m"), Memory: k8sutil.StringToPointer("5Mi")}, + }, + }, + { + subTest: "test matchLimitsWithRequestsIfSmaller", + config: config.Config{ + Resources: configResources, + PodManagementPolicy: "ordered_ready", + SetMemoryRequestToLimit: false, + }, + pgSpec: acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Resources: &acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("750Mi")}, + ResourceLimits: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("300Mi")}, + }, + TeamID: "acid", + Volume: acidv1.Volume{ + Size: "1G", + }, + }, + }, + expectedResources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("750Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("750Mi")}, + }, + }, + { + subTest: "defaults are not defined but minimum limit is", + config: config.Config{ + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: clusterNameLabel, + MinMemoryLimit: "250Mi", + PodRoleLabel: "spilo-role", + }, + PodManagementPolicy: "ordered_ready", + SetMemoryRequestToLimit: false, + }, + pgSpec: acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Resources: &acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("500Mi")}, + }, + TeamID: "acid", + Volume: acidv1.Volume{ + Size: "1G", + }, + }, + }, + expectedResources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("500Mi")}, + ResourceLimits: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("500Mi")}, + }, + }, + { + subTest: "test SetMemoryRequestToLimit flag", + config: config.Config{ + Resources: configResources, + PodManagementPolicy: "ordered_ready", + SetMemoryRequestToLimit: true, + }, + pgSpec: acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Resources: &acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("200Mi")}, + ResourceLimits: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("300Mi")}, + }, + TeamID: "acid", + Volume: acidv1.Volume{ + Size: "1G", + }, + }, + }, + expectedResources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("300Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("300Mi")}, + }, + }, + { + subTest: "test SetMemoryRequestToLimit flag for sidecar container, too", + config: config.Config{ + Resources: configResources, + PodManagementPolicy: "ordered_ready", + SetMemoryRequestToLimit: true, + }, + pgSpec: acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Sidecars: []acidv1.Sidecar{ + { Name: sidecarName, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, }, }, }, @@ -2830,8 +3246,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "100Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, }, }, { @@ -2848,8 +3264,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, Spec: acidv1.PostgresSpec{ Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "250Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "400m", Memory: "800Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("250Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("400m"), Memory: k8sutil.StringToPointer("800Mi")}, }, TeamID: "acid", Volume: acidv1.Volume{ @@ -2858,8 +3274,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "250Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "400m", Memory: "800Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("250Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("400m"), Memory: k8sutil.StringToPointer("800Mi")}, }, }, { @@ -2876,8 +3292,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, Spec: acidv1.PostgresSpec{ Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "200m", Memory: "200Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("200m"), Memory: k8sutil.StringToPointer("200Mi")}, }, TeamID: "acid", Volume: acidv1.Volume{ @@ -2886,8 +3302,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "250m", Memory: "250Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("250m"), Memory: k8sutil.StringToPointer("250Mi")}, }, }, { @@ -2904,11 +3320,11 @@ func TestGenerateResourceRequirements(t *testing.T) { }, Spec: acidv1.PostgresSpec{ Sidecars: []acidv1.Sidecar{ - acidv1.Sidecar{ + { Name: sidecarName, Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, }, }, }, @@ -2919,8 +3335,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "10Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, }, }, { @@ -2937,8 +3353,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, Spec: acidv1.PostgresSpec{ Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "2Gi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "2", Memory: "4Gi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("2Gi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")}, }, TeamID: "acid", Volume: acidv1.Volume{ @@ -2947,8 +3363,8 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "500m", Memory: "1Gi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "2", Memory: "4Gi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("500m"), Memory: k8sutil.StringToPointer("1Gi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("2"), Memory: k8sutil.StringToPointer("4Gi")}, }, }, { @@ -2965,8 +3381,35 @@ func TestGenerateResourceRequirements(t *testing.T) { }, Spec: acidv1.PostgresSpec{ Resources: &acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{Memory: "500Mi"}, - ResourceLimits: acidv1.ResourceDescription{Memory: "2Gi"}, + ResourceRequests: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("500Mi")}, + ResourceLimits: acidv1.ResourceDescription{Memory: k8sutil.StringToPointer("2Gi")}, + }, + TeamID: "acid", + Volume: acidv1.Volume{ + Size: "1G", + }, + }, + }, + expectedResources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("1Gi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("2Gi")}, + }, + }, + { + subTest: "test HugePages are not set on container when not requested in manifest", + config: config.Config{ + Resources: configResources, + PodManagementPolicy: "ordered_ready", + }, + pgSpec: acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Resources: &acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{}, + ResourceLimits: acidv1.ResourceDescription{}, }, TeamID: "acid", Volume: acidv1.Volume{ @@ -2975,8 +3418,106 @@ func TestGenerateResourceRequirements(t *testing.T) { }, }, expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "1Gi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "2Gi"}, + ResourceRequests: acidv1.ResourceDescription{ + CPU: k8sutil.StringToPointer("100m"), + Memory: k8sutil.StringToPointer("100Mi"), + }, + ResourceLimits: acidv1.ResourceDescription{ + CPU: k8sutil.StringToPointer("1"), + Memory: k8sutil.StringToPointer("500Mi"), + }, + }, + }, + { + subTest: "test HugePages are passed through to the postgres container", + config: config.Config{ + Resources: configResources, + PodManagementPolicy: "ordered_ready", + }, + pgSpec: acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Resources: &acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{ + HugePages2Mi: k8sutil.StringToPointer("128Mi"), + HugePages1Gi: k8sutil.StringToPointer("1Gi"), + }, + ResourceLimits: acidv1.ResourceDescription{ + HugePages2Mi: k8sutil.StringToPointer("256Mi"), + HugePages1Gi: k8sutil.StringToPointer("2Gi"), + }, + }, + TeamID: "acid", + Volume: acidv1.Volume{ + Size: "1G", + }, + }, + }, + expectedResources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{ + CPU: k8sutil.StringToPointer("100m"), + Memory: k8sutil.StringToPointer("100Mi"), + HugePages2Mi: k8sutil.StringToPointer("128Mi"), + HugePages1Gi: k8sutil.StringToPointer("1Gi"), + }, + ResourceLimits: acidv1.ResourceDescription{ + CPU: k8sutil.StringToPointer("1"), + Memory: k8sutil.StringToPointer("500Mi"), + HugePages2Mi: k8sutil.StringToPointer("256Mi"), + HugePages1Gi: k8sutil.StringToPointer("2Gi"), + }, + }, + }, + { + subTest: "test HugePages are passed through on sidecars", + config: config.Config{ + Resources: configResources, + PodManagementPolicy: "ordered_ready", + }, + pgSpec: acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Sidecars: []acidv1.Sidecar{ + { + Name: "test-sidecar", + DockerImage: "test-image", + Resources: &acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{ + HugePages2Mi: k8sutil.StringToPointer("128Mi"), + HugePages1Gi: k8sutil.StringToPointer("1Gi"), + }, + ResourceLimits: acidv1.ResourceDescription{ + HugePages2Mi: k8sutil.StringToPointer("256Mi"), + HugePages1Gi: k8sutil.StringToPointer("2Gi"), + }, + }, + }, + }, + TeamID: "acid", + Volume: acidv1.Volume{ + Size: "1G", + }, + }, + }, + expectedResources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{ + CPU: k8sutil.StringToPointer("100m"), + Memory: k8sutil.StringToPointer("100Mi"), + HugePages2Mi: k8sutil.StringToPointer("128Mi"), + HugePages1Gi: k8sutil.StringToPointer("1Gi"), + }, + ResourceLimits: acidv1.ResourceDescription{ + CPU: k8sutil.StringToPointer("1"), + Memory: k8sutil.StringToPointer("500Mi"), + HugePages2Mi: k8sutil.StringToPointer("256Mi"), + HugePages1Gi: k8sutil.StringToPointer("2Gi"), + }, }, }, } @@ -3009,7 +3550,9 @@ func TestGenerateResourceRequirements(t *testing.T) { func TestGenerateLogicalBackupJob(t *testing.T) { clusterName := "acid-test-cluster" + teamId := "test" configResources := config.Resources{ + ClusterNameLabel: "cluster-name", DefaultCPURequest: "100m", DefaultCPULimit: "1", DefaultMemoryRequest: "100Mi", @@ -3017,12 +3560,14 @@ func TestGenerateLogicalBackupJob(t *testing.T) { } tests := []struct { - subTest string - config config.Config - specSchedule string - expectedSchedule string - expectedJobName string - expectedResources acidv1.Resources + subTest string + config config.Config + specSchedule string + expectedSchedule string + expectedJobName string + expectedResources acidv1.Resources + expectedAnnotation map[string]string + expectedLabel map[string]string }{ { subTest: "test generation of logical backup pod resources when not configured", @@ -3039,9 +3584,11 @@ func TestGenerateLogicalBackupJob(t *testing.T) { expectedSchedule: "30 00 * * *", expectedJobName: "logical-backup-acid-test-cluster", expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "100Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "500Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")}, }, + expectedLabel: map[string]string{configResources.ClusterNameLabel: clusterName, "team": teamId}, + expectedAnnotation: nil, }, { subTest: "test generation of logical backup pod resources when configured", @@ -3062,9 +3609,11 @@ func TestGenerateLogicalBackupJob(t *testing.T) { expectedSchedule: "30 00 * * 7", expectedJobName: "lb-acid-test-cluster", expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "10m", Memory: "50Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "300m", Memory: "300Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("50Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("300m"), Memory: k8sutil.StringToPointer("300Mi")}, }, + expectedLabel: map[string]string{configResources.ClusterNameLabel: clusterName, "team": teamId}, + expectedAnnotation: nil, }, { subTest: "test generation of logical backup pod resources when partly configured", @@ -3083,9 +3632,11 @@ func TestGenerateLogicalBackupJob(t *testing.T) { expectedSchedule: "30 00 * * *", expectedJobName: "acid-test-cluster", expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "50m", Memory: "100Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "250m", Memory: "500Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("50m"), Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("250m"), Memory: k8sutil.StringToPointer("500Mi")}, }, + expectedLabel: map[string]string{configResources.ClusterNameLabel: clusterName, "team": teamId}, + expectedAnnotation: nil, }, { subTest: "test generation of logical backup pod resources with SetMemoryRequestToLimit enabled", @@ -3104,9 +3655,55 @@ func TestGenerateLogicalBackupJob(t *testing.T) { expectedSchedule: "30 00 * * *", expectedJobName: "test-long-prefix-so-name-must-be-trimmed-acid-test-c", expectedResources: acidv1.Resources{ - ResourceRequests: acidv1.ResourceDescription{CPU: "100m", Memory: "200Mi"}, - ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "200Mi"}, + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("200Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("200Mi")}, + }, + expectedLabel: map[string]string{configResources.ClusterNameLabel: clusterName, "team": teamId}, + expectedAnnotation: nil, + }, + { + subTest: "test generation of pod annotations when cluster InheritedLabel is set", + config: config.Config{ + Resources: config.Resources{ + ClusterNameLabel: "cluster-name", + InheritedLabels: []string{"labelKey"}, + DefaultCPURequest: "100m", + DefaultCPULimit: "1", + DefaultMemoryRequest: "100Mi", + DefaultMemoryLimit: "500Mi", + }, + }, + specSchedule: "", + expectedJobName: "acid-test-cluster", + expectedSchedule: "", + expectedResources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")}, + }, + expectedLabel: map[string]string{"labelKey": "labelValue", "cluster-name": clusterName, "team": teamId}, + expectedAnnotation: nil, + }, + { + subTest: "test generation of pod annotations when cluster InheritedAnnotations is set", + config: config.Config{ + Resources: config.Resources{ + ClusterNameLabel: "cluster-name", + InheritedAnnotations: []string{"annotationKey"}, + DefaultCPURequest: "100m", + DefaultCPULimit: "1", + DefaultMemoryRequest: "100Mi", + DefaultMemoryLimit: "500Mi", + }, + }, + specSchedule: "", + expectedJobName: "acid-test-cluster", + expectedSchedule: "", + expectedResources: acidv1.Resources{ + ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, + ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("1"), Memory: k8sutil.StringToPointer("500Mi")}, }, + expectedLabel: map[string]string{configResources.ClusterNameLabel: clusterName, "team": teamId}, + expectedAnnotation: map[string]string{"annotationKey": "annotationValue"}, }, } @@ -3115,12 +3712,24 @@ func TestGenerateLogicalBackupJob(t *testing.T) { Config{ OpConfig: tt.config, }, k8sutil.NewMockKubernetesClient(), acidv1.Postgresql{}, logger, eventRecorder) - cluster.ObjectMeta.Name = clusterName + cluster.Spec.TeamID = teamId + if cluster.ObjectMeta.Labels == nil { + cluster.ObjectMeta.Labels = make(map[string]string) + } + if cluster.ObjectMeta.Annotations == nil { + cluster.ObjectMeta.Annotations = make(map[string]string) + } + cluster.ObjectMeta.Labels["labelKey"] = "labelValue" + cluster.ObjectMeta.Annotations["annotationKey"] = "annotationValue" cluster.Spec.LogicalBackupSchedule = tt.specSchedule cronJob, err := cluster.generateLogicalBackupJob() assert.NoError(t, err) + if !reflect.DeepEqual(cronJob.ObjectMeta.OwnerReferences, cluster.ownerReferences()) { + t.Errorf("%s - %s: expected owner references %#v, got %#v", t.Name(), tt.subTest, cluster.ownerReferences(), cronJob.ObjectMeta.OwnerReferences) + } + if cronJob.Spec.Schedule != tt.expectedSchedule { t.Errorf("%s - %s: expected schedule %s, got %s", t.Name(), tt.subTest, tt.expectedSchedule, cronJob.Spec.Schedule) } @@ -3129,6 +3738,14 @@ func TestGenerateLogicalBackupJob(t *testing.T) { t.Errorf("%s - %s: expected job name %s, got %s", t.Name(), tt.subTest, tt.expectedJobName, cronJob.Name) } + if !reflect.DeepEqual(cronJob.Labels, tt.expectedLabel) { + t.Errorf("%s - %s: expected labels %s, got %s", t.Name(), tt.subTest, tt.expectedLabel, cronJob.Labels) + } + + if !reflect.DeepEqual(cronJob.Annotations, tt.expectedAnnotation) { + t.Errorf("%s - %s: expected annotations %s, got %s", t.Name(), tt.subTest, tt.expectedAnnotation, cronJob.Annotations) + } + containers := cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers clusterResources, err := parseResourceRequirements(containers[0].Resources) assert.NoError(t, err) @@ -3138,6 +3755,191 @@ func TestGenerateLogicalBackupJob(t *testing.T) { } } +func TestGenerateLogicalBackupPodEnvVars(t *testing.T) { + var ( + dummyUUID = "efd12e58-5786-11e8-b5a7-06148230260c" + dummyBucket = "dummy-backup-location" + ) + + expectedLogicalBackupS3Bucket := []ExpectedValue{ + { + envIndex: 9, + envVarConstant: "LOGICAL_BACKUP_PROVIDER", + envVarValue: "s3", + }, + { + envIndex: 10, + envVarConstant: "LOGICAL_BACKUP_S3_BUCKET", + envVarValue: dummyBucket, + }, + { + envIndex: 11, + envVarConstant: "LOGICAL_BACKUP_S3_BUCKET_PREFIX", + envVarValue: "spilo", + }, + { + envIndex: 12, + envVarConstant: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX", + envVarValue: "/" + dummyUUID, + }, + { + envIndex: 13, + envVarConstant: "LOGICAL_BACKUP_S3_REGION", + envVarValue: "eu-central-1", + }, + { + envIndex: 14, + envVarConstant: "LOGICAL_BACKUP_S3_ENDPOINT", + envVarValue: "", + }, + { + envIndex: 15, + envVarConstant: "LOGICAL_BACKUP_S3_SSE", + envVarValue: "", + }, + { + envIndex: 16, + envVarConstant: "LOGICAL_BACKUP_S3_RETENTION_TIME", + envVarValue: "1 month", + }, + } + + expectedLogicalBackupGCPCreds := []ExpectedValue{ + { + envIndex: 9, + envVarConstant: "LOGICAL_BACKUP_PROVIDER", + envVarValue: "gcs", + }, + { + envIndex: 13, + envVarConstant: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS", + envVarValue: "some-path-to-credentials", + }, + } + + expectedLogicalBackupAzureStorage := []ExpectedValue{ + { + envIndex: 9, + envVarConstant: "LOGICAL_BACKUP_PROVIDER", + envVarValue: "az", + }, + { + envIndex: 13, + envVarConstant: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_NAME", + envVarValue: "some-azure-storage-account-name", + }, + { + envIndex: 14, + envVarConstant: "LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER", + envVarValue: "some-azure-storage-container", + }, + { + envIndex: 15, + envVarConstant: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY", + envVarValue: "some-azure-storage-account-key", + }, + } + + expectedLogicalBackupRetentionTime := []ExpectedValue{ + { + envIndex: 16, + envVarConstant: "LOGICAL_BACKUP_S3_RETENTION_TIME", + envVarValue: "3 months", + }, + } + + tests := []struct { + subTest string + opConfig config.Config + expectedValues []ExpectedValue + pgsql acidv1.Postgresql + }{ + { + subTest: "logical backup with provider: s3", + opConfig: config.Config{ + LogicalBackup: config.LogicalBackup{ + LogicalBackupProvider: "s3", + LogicalBackupS3Bucket: dummyBucket, + LogicalBackupS3BucketPrefix: "spilo", + LogicalBackupS3Region: "eu-central-1", + LogicalBackupS3RetentionTime: "1 month", + }, + }, + expectedValues: expectedLogicalBackupS3Bucket, + }, + { + subTest: "logical backup with provider: gcs", + opConfig: config.Config{ + LogicalBackup: config.LogicalBackup{ + LogicalBackupProvider: "gcs", + LogicalBackupS3Bucket: dummyBucket, + LogicalBackupGoogleApplicationCredentials: "some-path-to-credentials", + }, + }, + expectedValues: expectedLogicalBackupGCPCreds, + }, + { + subTest: "logical backup with provider: az", + opConfig: config.Config{ + LogicalBackup: config.LogicalBackup{ + LogicalBackupProvider: "az", + LogicalBackupS3Bucket: dummyBucket, + LogicalBackupAzureStorageAccountName: "some-azure-storage-account-name", + LogicalBackupAzureStorageContainer: "some-azure-storage-container", + LogicalBackupAzureStorageAccountKey: "some-azure-storage-account-key", + }, + }, + expectedValues: expectedLogicalBackupAzureStorage, + }, + { + subTest: "will override retention time parameter", + opConfig: config.Config{ + LogicalBackup: config.LogicalBackup{ + LogicalBackupProvider: "s3", + LogicalBackupS3RetentionTime: "1 month", + }, + }, + expectedValues: expectedLogicalBackupRetentionTime, + pgsql: acidv1.Postgresql{ + Spec: acidv1.PostgresSpec{ + LogicalBackupRetention: "3 months", + }, + }, + }, + } + + for _, tt := range tests { + c := newMockCluster(tt.opConfig) + pgsql := tt.pgsql + c.Postgresql = pgsql + c.UID = types.UID(dummyUUID) + + actualEnvs := c.generateLogicalBackupPodEnvVars() + + for _, ev := range tt.expectedValues { + env := actualEnvs[ev.envIndex] + + if env.Name != ev.envVarConstant { + t.Errorf("%s %s: expected env name %s, have %s instead", + t.Name(), tt.subTest, ev.envVarConstant, env.Name) + } + + if ev.envVarValueRef != nil { + if !reflect.DeepEqual(env.ValueFrom, ev.envVarValueRef) { + t.Errorf("%s %s: expected env value reference %#v, have %#v instead", + t.Name(), tt.subTest, ev.envVarValueRef, env.ValueFrom) + } + continue + } + + if env.Value != ev.envVarValue { + t.Errorf("%s %s: expected env value %s, have %s instead", + t.Name(), tt.subTest, ev.envVarValue, env.Value) + } + } + } +} + func TestGenerateCapabilities(t *testing.T) { tests := []struct { subTest string diff --git a/pkg/cluster/majorversionupgrade.go b/pkg/cluster/majorversionupgrade.go index f635dc604..d8a1fb917 100644 --- a/pkg/cluster/majorversionupgrade.go +++ b/pkg/cluster/majorversionupgrade.go @@ -1,25 +1,34 @@ package cluster import ( + "context" + "encoding/json" "fmt" "strings" + "github.com/Masterminds/semver" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) // VersionMap Map of version numbers var VersionMap = map[string]int{ - "10": 100000, - "11": 110000, "12": 120000, "13": 130000, "14": 140000, "15": 150000, - + "16": 160000, + "17": 170000, } +const ( + majorVersionUpgradeSuccessAnnotation = "last-major-upgrade-success" + majorVersionUpgradeFailureAnnotation = "last-major-upgrade-failure" +) + // IsBiggerPostgresVersion Compare two Postgres version numbers func IsBiggerPostgresVersion(old string, new string) bool { oldN := VersionMap[old] @@ -36,7 +45,7 @@ func (c *Cluster) GetDesiredMajorVersionAsInt() int { func (c *Cluster) GetDesiredMajorVersion() string { if c.Config.OpConfig.MajorVersionUpgradeMode == "full" { - // e.g. current is 10, minimal is 11 allowing 11 to 15 clusters, everything below is upgraded + // e.g. current is 13, minimal is 13 allowing 13 to 17 clusters, everything below is upgraded if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) { c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion) return c.Config.OpConfig.TargetMajorVersion @@ -56,6 +65,63 @@ func (c *Cluster) isUpgradeAllowedForTeam(owningTeam string) bool { return util.SliceContains(allowedTeams, owningTeam) } +func (c *Cluster) annotatePostgresResource(isSuccess bool) error { + annotations := make(map[string]string) + currentTime := metav1.Now().Format("2006-01-02T15:04:05Z") + if isSuccess { + annotations[majorVersionUpgradeSuccessAnnotation] = currentTime + } else { + annotations[majorVersionUpgradeFailureAnnotation] = currentTime + } + patchData, err := metaAnnotationsPatch(annotations) + if err != nil { + c.logger.Errorf("could not form patch for %s postgresql resource: %v", c.Name, err) + return err + } + _, err = c.KubeClient.Postgresqls(c.Namespace).Patch(context.Background(), c.Name, types.MergePatchType, patchData, metav1.PatchOptions{}) + if err != nil { + c.logger.Errorf("failed to patch annotations to postgresql resource: %v", err) + return err + } + return nil +} + +func (c *Cluster) removeFailuresAnnotation() error { + annotationToRemove := []map[string]string{ + { + "op": "remove", + "path": fmt.Sprintf("/metadata/annotations/%s", majorVersionUpgradeFailureAnnotation), + }, + } + removePatch, err := json.Marshal(annotationToRemove) + if err != nil { + c.logger.Errorf("could not form removal patch for %s postgresql resource: %v", c.Name, err) + return err + } + _, err = c.KubeClient.Postgresqls(c.Namespace).Patch(context.Background(), c.Name, types.JSONPatchType, removePatch, metav1.PatchOptions{}) + if err != nil { + c.logger.Errorf("failed to remove annotations from postgresql resource: %v", err) + return err + } + return nil +} + +func (c *Cluster) criticalOperationLabel(pods []v1.Pod, value *string) error { + metadataReq := map[string]map[string]map[string]*string{"metadata": {"labels": {"critical-operation": value}}} + + patchReq, err := json.Marshal(metadataReq) + if err != nil { + return fmt.Errorf("could not marshal ObjectMeta: %v", err) + } + for _, pod := range pods { + _, err = c.KubeClient.Pods(c.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patchReq, metav1.PatchOptions{}) + if err != nil { + return err + } + } + return nil +} + /* Execute upgrade when mode is set to manual or full or when the owning team is allowed for upgrade (and mode is "off"). @@ -71,6 +137,10 @@ func (c *Cluster) majorVersionUpgrade() error { desiredVersion := c.GetDesiredMajorVersionAsInt() if c.currentMajorVersion >= desiredVersion { + if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists { // if failure annotation exists, remove it + c.removeFailuresAnnotation() + c.logger.Infof("removing failure annotation as the cluster is already up to date") + } c.logger.Infof("cluster version up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion) return nil } @@ -81,54 +151,138 @@ func (c *Cluster) majorVersionUpgrade() error { } allRunning := true + isStandbyCluster := false var masterPod *v1.Pod for i, pod := range pods { ps, _ := c.patroni.GetMemberData(&pod) + if ps.Role == "standby_leader" { + isStandbyCluster = true + c.currentMajorVersion = ps.ServerVersion + break + } + if ps.State != "running" { allRunning = false c.logger.Infof("identified non running pod, potentially skipping major version upgrade") } - if ps.Role == "master" { + if ps.Role == "master" || ps.Role == "primary" { masterPod = &pods[i] c.currentMajorVersion = ps.ServerVersion } } + if masterPod == nil { + c.logger.Infof("no master in the cluster, skipping major version upgrade") + return nil + } + + // Recheck version with newest data from Patroni + if c.currentMajorVersion >= desiredVersion { + if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists { // if failure annotation exists, remove it + c.removeFailuresAnnotation() + c.logger.Infof("removing failure annotation as the cluster is already up to date") + } + c.logger.Infof("recheck cluster version is already up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion) + return nil + } else if isStandbyCluster { + c.logger.Warnf("skipping major version upgrade for %s/%s standby cluster. Re-deploy standby cluster with the required Postgres version specified", c.Namespace, c.Name) + return nil + } + + if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists { + c.logger.Infof("last major upgrade failed, skipping upgrade") + return nil + } + + if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) { + c.logger.Infof("skipping major version upgrade, not in maintenance window") + return nil + } + + members, err := c.patroni.GetClusterMembers(masterPod) + if err != nil { + c.logger.Error("could not get cluster members data from Patroni API, skipping major version upgrade") + return err + } + patroniData, err := c.patroni.GetMemberData(masterPod) + if err != nil { + c.logger.Error("could not get members data from Patroni API, skipping major version upgrade") + return err + } + patroniVer, err := semver.NewVersion(patroniData.Patroni.Version) + if err != nil { + c.logger.Error("error parsing Patroni version") + patroniVer, _ = semver.NewVersion("3.0.4") + } + verConstraint, _ := semver.NewConstraint(">= 3.0.4") + checkStreaming, _ := verConstraint.Validate(patroniVer) + + for _, member := range members { + if PostgresRole(member.Role) == Leader { + continue + } + if checkStreaming && member.State != "streaming" { + c.logger.Infof("skipping major version upgrade, replica %s is not streaming from primary", member.Name) + return nil + } + if member.Lag > 16*1024*1024 { + c.logger.Infof("skipping major version upgrade, replication lag on member %s is too high", member.Name) + return nil + } + } + + isUpgradeSuccess := true numberOfPods := len(pods) if allRunning && masterPod != nil { c.logger.Infof("healthy cluster ready to upgrade, current: %d desired: %d", c.currentMajorVersion, desiredVersion) if c.currentMajorVersion < desiredVersion { + defer func() error { + if err = c.criticalOperationLabel(pods, nil); err != nil { + return fmt.Errorf("failed to remove critical-operation label: %s", err) + } + return nil + }() + val := "true" + if err = c.criticalOperationLabel(pods, &val); err != nil { + return fmt.Errorf("failed to assign critical-operation label: %s", err) + } + podName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name} c.logger.Infof("triggering major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods) - c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "Starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods) upgradeCommand := fmt.Sprintf("set -o pipefail && /usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods) - c.logger.Debugf("checking if the spilo image runs with root or non-root (check for user id=0)") + c.logger.Debug("checking if the spilo image runs with root or non-root (check for user id=0)") resultIdCheck, errIdCheck := c.ExecCommand(podName, "/bin/bash", "-c", "/usr/bin/id -u") if errIdCheck != nil { - c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "Checking user id to run upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, errIdCheck) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "checking user id to run upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, errIdCheck) } resultIdCheck = strings.TrimSuffix(resultIdCheck, "\n") - var result string + var result, scriptErrMsg string if resultIdCheck != "0" { - c.logger.Infof("User id was identified as: %s, hence default user is non-root already", resultIdCheck) + c.logger.Infof("user id was identified as: %s, hence default user is non-root already", resultIdCheck) result, err = c.ExecCommand(podName, "/bin/bash", "-c", upgradeCommand) + scriptErrMsg, _ = c.ExecCommand(podName, "/bin/bash", "-c", "tail -n 1 last_upgrade.log") } else { - c.logger.Infof("User id was identified as: %s, using su to reach the postgres user", resultIdCheck) + c.logger.Infof("user id was identified as: %s, using su to reach the postgres user", resultIdCheck) result, err = c.ExecCommand(podName, "/bin/su", "postgres", "-c", upgradeCommand) + scriptErrMsg, _ = c.ExecCommand(podName, "/bin/bash", "-c", "tail -n 1 last_upgrade.log") } if err != nil { - c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "Upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, err) - return err + isUpgradeSuccess = false + c.annotatePostgresResource(isUpgradeSuccess) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, scriptErrMsg) + return fmt.Errorf(scriptErrMsg) } - c.logger.Infof("upgrade action triggered and command completed: %s", result[:100]) - c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "Upgrade from %d to %d finished", c.currentMajorVersion, desiredVersion) + c.annotatePostgresResource(isUpgradeSuccess) + c.logger.Infof("upgrade action triggered and command completed: %s", result[:100]) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "upgrade from %d to %d finished", c.currentMajorVersion, desiredVersion) } } diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index 582e3cb47..7fc95090e 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -7,6 +7,8 @@ import ( "strconv" "time" + "golang.org/x/exp/slices" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -57,7 +59,7 @@ func (c *Cluster) markRollingUpdateFlagForPod(pod *v1.Pod, msg string) error { return nil } - c.logger.Debugf("mark rolling update annotation for %s: reason %s", pod.Name, msg) + c.logger.Infof("mark rolling update annotation for %s: reason %s", pod.Name, msg) flag := make(map[string]string) flag[rollingUpdatePodAnnotationKey] = strconv.FormatBool(true) @@ -108,7 +110,7 @@ func (c *Cluster) getRollingUpdateFlagFromPod(pod *v1.Pod) (flag bool) { } func (c *Cluster) deletePods() error { - c.logger.Debugln("deleting pods") + c.logger.Debug("deleting pods") pods, err := c.listPods() if err != nil { return err @@ -125,9 +127,9 @@ func (c *Cluster) deletePods() error { } } if len(pods) > 0 { - c.logger.Debugln("pods have been deleted") + c.logger.Debug("pods have been deleted") } else { - c.logger.Debugln("no pods to delete") + c.logger.Debug("no pods to delete") } return nil @@ -228,7 +230,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err) } if !eol { - c.logger.Debugf("no action needed: master pod is already on a live node") + c.logger.Debug("no action needed: master pod is already on a live node") return nil } @@ -278,11 +280,16 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { return fmt.Errorf("could not move pod: %v", err) } + scheduleSwitchover := false + if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) { + c.logger.Infof("postponing switchover, not in maintenance window") + scheduleSwitchover = true + } err = retryutil.Retry(1*time.Minute, 5*time.Minute, func() (bool, error) { - err := c.Switchover(oldMaster, masterCandidateName) + err := c.Switchover(oldMaster, masterCandidateName, scheduleSwitchover) if err != nil { - c.logger.Errorf("could not failover to pod %q: %v", masterCandidateName, err) + c.logger.Errorf("could not switchover to pod %q: %v", masterCandidateName, err) return false, nil } return true, nil @@ -443,7 +450,7 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp // do not recreate master now so it will keep the update flag and switchover will be retried on next sync return fmt.Errorf("skipping switchover: %v", err) } - if err := c.Switchover(masterPod, masterCandidate); err != nil { + if err := c.Switchover(masterPod, masterCandidate, false); err != nil { return fmt.Errorf("could not perform switch over: %v", err) } } else if newMasterPod == nil && len(replicas) == 0 { @@ -478,6 +485,9 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e if PostgresRole(member.Role) == SyncStandby { syncCandidates = append(syncCandidates, member) } + if PostgresRole(member.Role) != Leader && PostgresRole(member.Role) != StandbyLeader && slices.Contains([]string{"running", "streaming", "in archive recovery"}, member.State) { + candidates = append(candidates, member) + } } // if synchronous mode is enabled and no SyncStandy was found @@ -487,6 +497,12 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e return false, nil } + // retry also in asynchronous mode when no replica candidate was found + if !c.Spec.Patroni.SynchronousMode && len(candidates) == 0 { + c.logger.Warnf("no replica candidate found - retrying fetching cluster members") + return false, nil + } + return true, nil }, ) @@ -500,20 +516,12 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e return syncCandidates[i].Lag < syncCandidates[j].Lag }) return spec.NamespacedName{Namespace: master.Namespace, Name: syncCandidates[0].Name}, nil - } else { - // in asynchronous mode find running replicas - for _, member := range members { - if PostgresRole(member.Role) != Leader && PostgresRole(member.Role) != StandbyLeader && member.State == "running" { - candidates = append(candidates, member) - } - } - - if len(candidates) > 0 { - sort.Slice(candidates, func(i, j int) bool { - return candidates[i].Lag < candidates[j].Lag - }) - return spec.NamespacedName{Namespace: master.Namespace, Name: candidates[0].Name}, nil - } + } + if len(candidates) > 0 { + sort.Slice(candidates, func(i, j int) bool { + return candidates[i].Lag < candidates[j].Lag + }) + return spec.NamespacedName{Namespace: master.Namespace, Name: candidates[0].Name}, nil } return spec.NamespacedName{}, fmt.Errorf("no switchover candidate found") diff --git a/pkg/cluster/pod_test.go b/pkg/cluster/pod_test.go index 6a642387e..6816b4d7a 100644 --- a/pkg/cluster/pod_test.go +++ b/pkg/cluster/pod_test.go @@ -3,7 +3,7 @@ package cluster import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "testing" "time" @@ -42,28 +42,28 @@ func TestGetSwitchoverCandidate(t *testing.T) { }{ { subtest: "choose sync_standby over replica", - clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "sync_standby", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 0}]}`, + clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "sync_standby", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}, {"name": "acid-test-cluster-2", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 0}]}`, syncModeEnabled: true, expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"}, expectedError: nil, }, { subtest: "no running sync_standby available", - clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}]}`, + clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}]}`, syncModeEnabled: true, expectedCandidate: spec.NamespacedName{}, expectedError: fmt.Errorf("failed to get Patroni cluster members: unexpected end of JSON input"), }, { subtest: "choose replica with lowest lag", - clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 2}]}`, + clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 2}]}`, syncModeEnabled: false, expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-2"}, expectedError: nil, }, { - subtest: "choose first replica when lag is equal evrywhere", - clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 5}]}`, + subtest: "choose first replica when lag is equal everywhere", + clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 5}]}`, syncModeEnabled: false, expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"}, expectedError: nil, @@ -73,13 +73,20 @@ func TestGetSwitchoverCandidate(t *testing.T) { clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 2}, {"name": "acid-test-cluster-1", "role": "replica", "state": "starting", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 2}]}`, syncModeEnabled: false, expectedCandidate: spec.NamespacedName{}, - expectedError: fmt.Errorf("no switchover candidate found"), + expectedError: fmt.Errorf("failed to get Patroni cluster members: unexpected end of JSON input"), + }, + { + subtest: "replicas with different status", + clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "in archive recovery", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 2}]}`, + syncModeEnabled: false, + expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-2"}, + expectedError: nil, }, } for _, tt := range tests { // mocking cluster members - r := ioutil.NopCloser(bytes.NewReader([]byte(tt.clusterJson))) + r := io.NopCloser(bytes.NewReader([]byte(tt.clusterJson))) response := http.Response{ StatusCode: 200, diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index eb68e9fb2..2c87efe47 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -23,28 +23,49 @@ const ( ) func (c *Cluster) listResources() error { - if c.PodDisruptionBudget != nil { - c.logger.Infof("found pod disruption budget: %q (uid: %q)", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta), c.PodDisruptionBudget.UID) + if c.PrimaryPodDisruptionBudget != nil { + c.logger.Infof("found primary pod disruption budget: %q (uid: %q)", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta), c.PrimaryPodDisruptionBudget.UID) + } + + if c.CriticalOpPodDisruptionBudget != nil { + c.logger.Infof("found pod disruption budget for critical operations: %q (uid: %q)", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta), c.CriticalOpPodDisruptionBudget.UID) + } if c.Statefulset != nil { c.logger.Infof("found statefulset: %q (uid: %q)", util.NameFromMeta(c.Statefulset.ObjectMeta), c.Statefulset.UID) } - for _, obj := range c.Secrets { - c.logger.Infof("found secret: %q (uid: %q) namesapce: %s", util.NameFromMeta(obj.ObjectMeta), obj.UID, obj.ObjectMeta.Namespace) + for appId, stream := range c.Streams { + c.logger.Infof("found stream: %q with application id %q (uid: %q)", util.NameFromMeta(stream.ObjectMeta), appId, stream.UID) } - if !c.patroniKubernetesUseConfigMaps() { - for role, endpoint := range c.Endpoints { - c.logger.Infof("found %s endpoint: %q (uid: %q)", role, util.NameFromMeta(endpoint.ObjectMeta), endpoint.UID) - } + if c.LogicalBackupJob != nil { + c.logger.Infof("found logical backup job: %q (uid: %q)", util.NameFromMeta(c.LogicalBackupJob.ObjectMeta), c.LogicalBackupJob.UID) + } + + for uid, secret := range c.Secrets { + c.logger.Infof("found secret: %q (uid: %q) namespace: %s", util.NameFromMeta(secret.ObjectMeta), uid, secret.ObjectMeta.Namespace) } for role, service := range c.Services { c.logger.Infof("found %s service: %q (uid: %q)", role, util.NameFromMeta(service.ObjectMeta), service.UID) } + for role, endpoint := range c.Endpoints { + c.logger.Infof("found %s endpoint: %q (uid: %q)", role, util.NameFromMeta(endpoint.ObjectMeta), endpoint.UID) + } + + if c.patroniKubernetesUseConfigMaps() { + for suffix, configmap := range c.PatroniConfigMaps { + c.logger.Infof("found %s Patroni config map: %q (uid: %q)", suffix, util.NameFromMeta(configmap.ObjectMeta), configmap.UID) + } + } else { + for suffix, endpoint := range c.PatroniEndpoints { + c.logger.Infof("found %s Patroni endpoint: %q (uid: %q)", suffix, util.NameFromMeta(endpoint.ObjectMeta), endpoint.UID) + } + } + pods, err := c.listPods() if err != nil { return fmt.Errorf("could not get the list of pods: %v", err) @@ -54,13 +75,17 @@ func (c *Cluster) listResources() error { c.logger.Infof("found pod: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID) } - pvcs, err := c.listPersistentVolumeClaims() - if err != nil { - return fmt.Errorf("could not get the list of PVCs: %v", err) + for uid, pvc := range c.VolumeClaims { + c.logger.Infof("found persistent volume claim: %q (uid: %q)", util.NameFromMeta(pvc.ObjectMeta), uid) } - for _, obj := range pvcs { - c.logger.Infof("found PVC: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID) + for role, poolerObjs := range c.ConnectionPooler { + if poolerObjs.Deployment != nil { + c.logger.Infof("found %s pooler deployment: %q (uid: %q) ", role, util.NameFromMeta(poolerObjs.Deployment.ObjectMeta), poolerObjs.Deployment.UID) + } + if poolerObjs.Service != nil { + c.logger.Infof("found %s pooler service: %q (uid: %q) ", role, util.NameFromMeta(poolerObjs.Service.ObjectMeta), poolerObjs.Service.UID) + } } return nil @@ -142,8 +167,8 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error { return fmt.Errorf("pod %q does not belong to cluster", podName) } - if err := c.patroni.Switchover(&masterPod[0], masterCandidatePod.Name); err != nil { - return fmt.Errorf("could not failover: %v", err) + if err := c.patroni.Switchover(&masterPod[0], masterCandidatePod.Name, ""); err != nil { + return fmt.Errorf("could not switchover: %v", err) } return nil @@ -162,7 +187,7 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error { c.logger.Warningf("could not scale down: %v", err) } } - c.logger.Debugf("updating statefulset") + c.logger.Debug("updating statefulset") patchData, err := specPatch(newStatefulSet.Spec) if err != nil { @@ -193,7 +218,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error { } statefulSetName := util.NameFromMeta(c.Statefulset.ObjectMeta) - c.logger.Debugf("replacing statefulset") + c.logger.Debug("replacing statefulset") // Delete the current statefulset without deleting the pods deletePropagationPolicy := metav1.DeletePropagationOrphan @@ -207,7 +232,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error { // make sure we clear the stored statefulset status if the subsequent create fails. c.Statefulset = nil // wait until the statefulset is truly deleted - c.logger.Debugf("waiting for the statefulset to be deleted") + c.logger.Debug("waiting for the statefulset to be deleted") err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, func() (bool, error) { @@ -241,15 +266,19 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error { func (c *Cluster) deleteStatefulSet() error { c.setProcessName("deleting statefulset") - c.logger.Debugln("deleting statefulset") + c.logger.Debug("deleting statefulset") if c.Statefulset == nil { - return fmt.Errorf("there is no statefulset in the cluster") + c.logger.Debug("there is no statefulset in the cluster") + return nil } err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Delete(context.TODO(), c.Statefulset.Name, c.deleteOptions) - if err != nil { + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("statefulset %q has already been deleted", util.NameFromMeta(c.Statefulset.ObjectMeta)) + } else if err != nil { return err } + c.logger.Infof("statefulset %q has been deleted", util.NameFromMeta(c.Statefulset.ObjectMeta)) c.Statefulset = nil @@ -257,8 +286,12 @@ func (c *Cluster) deleteStatefulSet() error { return fmt.Errorf("could not delete pods: %v", err) } - if err := c.deletePersistentVolumeClaims(); err != nil { - return fmt.Errorf("could not delete PersistentVolumeClaims: %v", err) + if c.OpConfig.EnablePersistentVolumeClaimDeletion != nil && *c.OpConfig.EnablePersistentVolumeClaimDeletion { + if err := c.deletePersistentVolumeClaims(); err != nil { + return fmt.Errorf("could not delete persistent volume claims: %v", err) + } + } else { + c.logger.Info("not deleting persistent volume claims because disabled in configuration") } return nil @@ -278,55 +311,37 @@ func (c *Cluster) createService(role PostgresRole) (*v1.Service, error) { } func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newService *v1.Service) (*v1.Service, error) { - var ( - svc *v1.Service - err error - ) - - c.setProcessName("updating %v service", role) + var err error + svc := oldService serviceName := util.NameFromMeta(oldService.ObjectMeta) - - // update the service annotation in order to propagate ELB notation. - if len(newService.ObjectMeta.Annotations) > 0 { - if annotationsPatchData, err := metaAnnotationsPatch(newService.ObjectMeta.Annotations); err == nil { - _, err = c.KubeClient.Services(serviceName.Namespace).Patch( - context.TODO(), - serviceName.Name, - types.MergePatchType, - []byte(annotationsPatchData), - metav1.PatchOptions{}, - "") - - if err != nil { - return nil, fmt.Errorf("could not replace annotations for the service %q: %v", serviceName, err) - } - } else { - return nil, fmt.Errorf("could not form patch for the service metadata: %v", err) + match, reason := c.compareServices(oldService, newService) + if !match { + c.logServiceChanges(role, oldService, newService, false, reason) + c.setProcessName("updating %v service", role) + + // now, patch the service spec, but when disabling LoadBalancers do update instead + // patch does not work because of LoadBalancerSourceRanges field (even if set to nil) + oldServiceType := oldService.Spec.Type + newServiceType := newService.Spec.Type + if newServiceType == "ClusterIP" && newServiceType != oldServiceType { + newService.ResourceVersion = oldService.ResourceVersion + newService.Spec.ClusterIP = oldService.Spec.ClusterIP } - } - - // now, patch the service spec, but when disabling LoadBalancers do update instead - // patch does not work because of LoadBalancerSourceRanges field (even if set to nil) - oldServiceType := oldService.Spec.Type - newServiceType := newService.Spec.Type - if newServiceType == "ClusterIP" && newServiceType != oldServiceType { - newService.ResourceVersion = oldService.ResourceVersion - newService.Spec.ClusterIP = oldService.Spec.ClusterIP svc, err = c.KubeClient.Services(serviceName.Namespace).Update(context.TODO(), newService, metav1.UpdateOptions{}) if err != nil { return nil, fmt.Errorf("could not update service %q: %v", serviceName, err) } - } else { - patchData, err := specPatch(newService.Spec) + } + + if changed, _ := c.compareAnnotations(oldService.Annotations, newService.Annotations, nil); changed { + patchData, err := metaAnnotationsPatch(newService.Annotations) if err != nil { - return nil, fmt.Errorf("could not form patch for the service %q: %v", serviceName, err) + return nil, fmt.Errorf("could not form patch for service %q annotations: %v", oldService.Name, err) } - - svc, err = c.KubeClient.Services(serviceName.Namespace).Patch( - context.TODO(), serviceName.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "") + svc, err = c.KubeClient.Services(serviceName.Namespace).Patch(context.TODO(), newService.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) if err != nil { - return nil, fmt.Errorf("could not patch service %q: %v", serviceName, err) + return nil, fmt.Errorf("could not patch annotations for service %q: %v", oldService.Name, err) } } @@ -334,20 +349,23 @@ func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newSe } func (c *Cluster) deleteService(role PostgresRole) error { - c.logger.Debugf("deleting service %s", role) + c.setProcessName("deleting service") + c.logger.Debugf("deleting %s service", role) - service, ok := c.Services[role] - if !ok { + if c.Services[role] == nil { c.logger.Debugf("No service for %s role was found, nothing to delete", role) return nil } - if err := c.KubeClient.Services(service.Namespace).Delete(context.TODO(), service.Name, c.deleteOptions); err != nil { - return err + if err := c.KubeClient.Services(c.Services[role].Namespace).Delete(context.TODO(), c.Services[role].Name, c.deleteOptions); err != nil { + if !k8sutil.ResourceNotFound(err) { + return fmt.Errorf("could not delete %s service: %v", role, err) + } + c.logger.Debugf("%s service has already been deleted", role) } - c.logger.Infof("%s service %q has been deleted", role, util.NameFromMeta(service.ObjectMeta)) - c.Services[role] = nil + c.logger.Infof("%s service %q has been deleted", role, util.NameFromMeta(c.Services[role].ObjectMeta)) + delete(c.Services, role) return nil } @@ -404,55 +422,166 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset return result } -func (c *Cluster) createPodDisruptionBudget() (*policyv1.PodDisruptionBudget, error) { - podDisruptionBudgetSpec := c.generatePodDisruptionBudget() +func (c *Cluster) createPrimaryPodDisruptionBudget() error { + c.logger.Debug("creating primary pod disruption budget") + if c.PrimaryPodDisruptionBudget != nil { + c.logger.Warning("primary pod disruption budget already exists in the cluster") + return nil + } + + podDisruptionBudgetSpec := c.generatePrimaryPodDisruptionBudget() podDisruptionBudget, err := c.KubeClient. PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace). Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{}) if err != nil { - return nil, err + return err } - c.PodDisruptionBudget = podDisruptionBudget + c.logger.Infof("primary pod disruption budget %q has been successfully created", util.NameFromMeta(podDisruptionBudget.ObjectMeta)) + c.PrimaryPodDisruptionBudget = podDisruptionBudget - return podDisruptionBudget, nil + return nil +} + +func (c *Cluster) createCriticalOpPodDisruptionBudget() error { + c.logger.Debug("creating pod disruption budget for critical operations") + if c.CriticalOpPodDisruptionBudget != nil { + c.logger.Warning("pod disruption budget for critical operations already exists in the cluster") + return nil + } + + podDisruptionBudgetSpec := c.generateCriticalOpPodDisruptionBudget() + podDisruptionBudget, err := c.KubeClient. + PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace). + Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{}) + + if err != nil { + return err + } + c.logger.Infof("pod disruption budget for critical operations %q has been successfully created", util.NameFromMeta(podDisruptionBudget.ObjectMeta)) + c.CriticalOpPodDisruptionBudget = podDisruptionBudget + + return nil +} + +func (c *Cluster) createPodDisruptionBudgets() error { + errors := make([]string, 0) + + err := c.createPrimaryPodDisruptionBudget() + if err != nil { + errors = append(errors, fmt.Sprintf("could not create primary pod disruption budget: %v", err)) + } + + err = c.createCriticalOpPodDisruptionBudget() + if err != nil { + errors = append(errors, fmt.Sprintf("could not create pod disruption budget for critical operations: %v", err)) + } + + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, `', '`)) + } + return nil } -func (c *Cluster) updatePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error { - if c.PodDisruptionBudget == nil { - return fmt.Errorf("there is no pod disruption budget in the cluster") +func (c *Cluster) updatePrimaryPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error { + c.logger.Debug("updating primary pod disruption budget") + if c.PrimaryPodDisruptionBudget == nil { + return fmt.Errorf("there is no primary pod disruption budget in the cluster") } - if err := c.deletePodDisruptionBudget(); err != nil { - return fmt.Errorf("could not delete pod disruption budget: %v", err) + if err := c.deletePrimaryPodDisruptionBudget(); err != nil { + return fmt.Errorf("could not delete primary pod disruption budget: %v", err) } newPdb, err := c.KubeClient. PodDisruptionBudgets(pdb.Namespace). Create(context.TODO(), pdb, metav1.CreateOptions{}) if err != nil { - return fmt.Errorf("could not create pod disruption budget: %v", err) + return fmt.Errorf("could not create primary pod disruption budget: %v", err) } - c.PodDisruptionBudget = newPdb + c.PrimaryPodDisruptionBudget = newPdb return nil } -func (c *Cluster) deletePodDisruptionBudget() error { - c.logger.Debug("deleting pod disruption budget") - if c.PodDisruptionBudget == nil { - return fmt.Errorf("there is no pod disruption budget in the cluster") +func (c *Cluster) updateCriticalOpPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error { + c.logger.Debug("updating pod disruption budget for critical operations") + if c.CriticalOpPodDisruptionBudget == nil { + return fmt.Errorf("there is no pod disruption budget for critical operations in the cluster") + } + + if err := c.deleteCriticalOpPodDisruptionBudget(); err != nil { + return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err) + } + + newPdb, err := c.KubeClient. + PodDisruptionBudgets(pdb.Namespace). + Create(context.TODO(), pdb, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("could not create pod disruption budget for critical operations: %v", err) + } + c.CriticalOpPodDisruptionBudget = newPdb + + return nil +} + +func (c *Cluster) deletePrimaryPodDisruptionBudget() error { + c.logger.Debug("deleting primary pod disruption budget") + if c.PrimaryPodDisruptionBudget == nil { + c.logger.Debug("there is no primary pod disruption budget in the cluster") + return nil } - pdbName := util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta) + pdbName := util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta) err := c.KubeClient. - PodDisruptionBudgets(c.PodDisruptionBudget.Namespace). - Delete(context.TODO(), c.PodDisruptionBudget.Name, c.deleteOptions) + PodDisruptionBudgets(c.PrimaryPodDisruptionBudget.Namespace). + Delete(context.TODO(), c.PrimaryPodDisruptionBudget.Name, c.deleteOptions) + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta)) + } else if err != nil { + return fmt.Errorf("could not delete primary pod disruption budget: %v", err) + } + + c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta)) + c.PrimaryPodDisruptionBudget = nil + + err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, + func() (bool, error) { + _, err2 := c.KubeClient.PodDisruptionBudgets(pdbName.Namespace).Get(context.TODO(), pdbName.Name, metav1.GetOptions{}) + if err2 == nil { + return false, nil + } + if k8sutil.ResourceNotFound(err2) { + return true, nil + } + return false, err2 + }) if err != nil { - return fmt.Errorf("could not delete pod disruption budget: %v", err) + return fmt.Errorf("could not delete primary pod disruption budget: %v", err) + } + + return nil +} + +func (c *Cluster) deleteCriticalOpPodDisruptionBudget() error { + c.logger.Debug("deleting pod disruption budget for critical operations") + if c.CriticalOpPodDisruptionBudget == nil { + c.logger.Debug("there is no pod disruption budget for critical operations in the cluster") + return nil + } + + pdbName := util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta) + err := c.KubeClient. + PodDisruptionBudgets(c.CriticalOpPodDisruptionBudget.Namespace). + Delete(context.TODO(), c.CriticalOpPodDisruptionBudget.Name, c.deleteOptions) + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta)) + } else if err != nil { + return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err) } - c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta)) - c.PodDisruptionBudget = nil + + c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta)) + c.CriticalOpPodDisruptionBudget = nil err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, func() (bool, error) { @@ -466,27 +595,117 @@ func (c *Cluster) deletePodDisruptionBudget() error { return false, err2 }) if err != nil { - return fmt.Errorf("could not delete pod disruption budget: %v", err) + return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err) } return nil } +func (c *Cluster) deletePodDisruptionBudgets() error { + errors := make([]string, 0) + + if err := c.deletePrimaryPodDisruptionBudget(); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + + if err := c.deleteCriticalOpPodDisruptionBudget(); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, `', '`)) + } + return nil +} + func (c *Cluster) deleteEndpoint(role PostgresRole) error { c.setProcessName("deleting endpoint") - c.logger.Debugln("deleting endpoint") + c.logger.Debugf("deleting %s endpoint", role) if c.Endpoints[role] == nil { - return fmt.Errorf("there is no %s endpoint in the cluster", role) + c.logger.Debugf("there is no %s endpoint in the cluster", role) + return nil } - if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete( - context.TODO(), c.Endpoints[role].Name, c.deleteOptions); err != nil { - return fmt.Errorf("could not delete endpoint: %v", err) + if err := c.KubeClient.Endpoints(c.Endpoints[role].Namespace).Delete(context.TODO(), c.Endpoints[role].Name, c.deleteOptions); err != nil { + if !k8sutil.ResourceNotFound(err) { + return fmt.Errorf("could not delete %s endpoint: %v", role, err) + } + c.logger.Debugf("%s endpoint has already been deleted", role) } - c.logger.Infof("endpoint %q has been deleted", util.NameFromMeta(c.Endpoints[role].ObjectMeta)) + c.logger.Infof("%s endpoint %q has been deleted", role, util.NameFromMeta(c.Endpoints[role].ObjectMeta)) + delete(c.Endpoints, role) - c.Endpoints[role] = nil + return nil +} + +func (c *Cluster) deletePatroniResources() error { + c.setProcessName("deleting Patroni resources") + errors := make([]string, 0) + + if err := c.deleteService(Patroni); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + + for _, suffix := range patroniObjectSuffixes { + if c.patroniKubernetesUseConfigMaps() { + if err := c.deletePatroniConfigMap(suffix); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + } else { + if err := c.deletePatroniEndpoint(suffix); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + } + } + + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, `', '`)) + } + + return nil +} + +func (c *Cluster) deletePatroniConfigMap(suffix string) error { + c.setProcessName("deleting Patroni config map") + c.logger.Debugf("deleting %s Patroni config map", suffix) + cm := c.PatroniConfigMaps[suffix] + if cm == nil { + c.logger.Debugf("there is no %s Patroni config map in the cluster", suffix) + return nil + } + + if err := c.KubeClient.ConfigMaps(cm.Namespace).Delete(context.TODO(), cm.Name, c.deleteOptions); err != nil { + if !k8sutil.ResourceNotFound(err) { + return fmt.Errorf("could not delete %s Patroni config map %q: %v", suffix, cm.Name, err) + } + c.logger.Debugf("%s Patroni config map has already been deleted", suffix) + } + + c.logger.Infof("%s Patroni config map %q has been deleted", suffix, util.NameFromMeta(cm.ObjectMeta)) + delete(c.PatroniConfigMaps, suffix) + + return nil +} + +func (c *Cluster) deletePatroniEndpoint(suffix string) error { + c.setProcessName("deleting Patroni endpoint") + c.logger.Debugf("deleting %s Patroni endpoint", suffix) + ep := c.PatroniEndpoints[suffix] + if ep == nil { + c.logger.Debugf("there is no %s Patroni endpoint in the cluster", suffix) + return nil + } + + if err := c.KubeClient.Endpoints(ep.Namespace).Delete(context.TODO(), ep.Name, c.deleteOptions); err != nil { + if !k8sutil.ResourceNotFound(err) { + return fmt.Errorf("could not delete %s Patroni endpoint %q: %v", suffix, ep.Name, err) + } + c.logger.Debugf("%s Patroni endpoint has already been deleted", suffix) + } + + c.logger.Infof("%s Patroni endpoint %q has been deleted", suffix, util.NameFromMeta(ep.ObjectMeta)) + delete(c.PatroniEndpoints, suffix) return nil } @@ -495,8 +714,8 @@ func (c *Cluster) deleteSecrets() error { c.setProcessName("deleting secrets") errors := make([]string, 0) - for uid, secret := range c.Secrets { - err := c.deleteSecret(uid, *secret) + for uid := range c.Secrets { + err := c.deleteSecret(uid) if err != nil { errors = append(errors, fmt.Sprintf("%v", err)) } @@ -509,12 +728,15 @@ func (c *Cluster) deleteSecrets() error { return nil } -func (c *Cluster) deleteSecret(uid types.UID, secret v1.Secret) error { +func (c *Cluster) deleteSecret(uid types.UID) error { c.setProcessName("deleting secret") + secret := c.Secrets[uid] secretName := util.NameFromMeta(secret.ObjectMeta) c.logger.Debugf("deleting secret %q", secretName) err := c.KubeClient.Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, c.deleteOptions) - if err != nil { + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("secret %q has already been deleted", secretName) + } else if err != nil { return fmt.Errorf("could not delete secret %q: %v", secretName, err) } c.logger.Infof("secret %q has been deleted", secretName) @@ -536,12 +758,12 @@ func (c *Cluster) createLogicalBackupJob() (err error) { if err != nil { return fmt.Errorf("could not generate k8s cron job spec: %v", err) } - c.logger.Debugf("Generated cronJobSpec: %v", logicalBackupJobSpec) - _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Create(context.TODO(), logicalBackupJobSpec, metav1.CreateOptions{}) + cronJob, err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Create(context.TODO(), logicalBackupJobSpec, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("could not create k8s cron job: %v", err) } + c.LogicalBackupJob = cronJob return nil } @@ -555,7 +777,7 @@ func (c *Cluster) patchLogicalBackupJob(newJob *batchv1.CronJob) error { } // update the backup job spec - _, err = c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Patch( + cronJob, err := c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Patch( context.TODO(), c.getLogicalBackupJobName(), types.MergePatchType, @@ -565,15 +787,26 @@ func (c *Cluster) patchLogicalBackupJob(newJob *batchv1.CronJob) error { if err != nil { return fmt.Errorf("could not patch logical backup job: %v", err) } + c.LogicalBackupJob = cronJob return nil } func (c *Cluster) deleteLogicalBackupJob() error { - + if c.LogicalBackupJob == nil { + return nil + } c.logger.Info("removing the logical backup job") - return c.KubeClient.CronJobsGetter.CronJobs(c.Namespace).Delete(context.TODO(), c.getLogicalBackupJobName(), c.deleteOptions) + err := c.KubeClient.CronJobsGetter.CronJobs(c.LogicalBackupJob.Namespace).Delete(context.TODO(), c.getLogicalBackupJobName(), c.deleteOptions) + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("logical backup cron job %q has already been deleted", c.getLogicalBackupJobName()) + } else if err != nil { + return err + } + c.LogicalBackupJob = nil + + return nil } // GetServiceMaster returns cluster's kubernetes master Service @@ -601,7 +834,12 @@ func (c *Cluster) GetStatefulSet() *appsv1.StatefulSet { return c.Statefulset } -// GetPodDisruptionBudget returns cluster's kubernetes PodDisruptionBudget -func (c *Cluster) GetPodDisruptionBudget() *policyv1.PodDisruptionBudget { - return c.PodDisruptionBudget +// GetPrimaryPodDisruptionBudget returns cluster's primary kubernetes PodDisruptionBudget +func (c *Cluster) GetPrimaryPodDisruptionBudget() *policyv1.PodDisruptionBudget { + return c.PrimaryPodDisruptionBudget +} + +// GetCriticalOpPodDisruptionBudget returns cluster's kubernetes PodDisruptionBudget for critical operations +func (c *Cluster) GetCriticalOpPodDisruptionBudget() *policyv1.PodDisruptionBudget { + return c.CriticalOpPodDisruptionBudget } diff --git a/pkg/cluster/streams.go b/pkg/cluster/streams.go index 10df7974c..bf9be3fb4 100644 --- a/pkg/cluster/streams.go +++ b/pkg/cluster/streams.go @@ -2,6 +2,7 @@ package cluster import ( "context" + "encoding/json" "fmt" "reflect" "sort" @@ -13,6 +14,7 @@ import ( "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) func (c *Cluster) createStreams(appId string) (*zalandov1.FabricEventStream, error) { @@ -27,37 +29,48 @@ func (c *Cluster) createStreams(appId string) (*zalandov1.FabricEventStream, err return streamCRD, nil } -func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) error { +func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) (patchedStream *zalandov1.FabricEventStream, err error) { c.setProcessName("updating event streams") - if _, err := c.KubeClient.FabricEventStreams(newEventStreams.Namespace).Update(context.TODO(), newEventStreams, metav1.UpdateOptions{}); err != nil { - return err + patch, err := json.Marshal(newEventStreams) + if err != nil { + return nil, fmt.Errorf("could not marshal new event stream CRD %q: %v", newEventStreams.Name, err) + } + if patchedStream, err = c.KubeClient.FabricEventStreams(newEventStreams.Namespace).Patch( + context.TODO(), newEventStreams.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil { + return nil, err + } + + return patchedStream, nil +} + +func (c *Cluster) deleteStream(appId string) error { + c.setProcessName("deleting event stream") + c.logger.Debugf("deleting event stream with applicationId %s", appId) + + err := c.KubeClient.FabricEventStreams(c.Streams[appId].Namespace).Delete(context.TODO(), c.Streams[appId].Name, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("could not delete event stream %q with applicationId %s: %v", c.Streams[appId].Name, appId, err) } + c.logger.Infof("event stream %q with applicationId %s has been successfully deleted", c.Streams[appId].Name, appId) + delete(c.Streams, appId) return nil } func (c *Cluster) deleteStreams() error { - c.setProcessName("deleting event streams") - // check if stream CRD is installed before trying a delete _, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{}) if k8sutil.ResourceNotFound(err) { return nil } - + c.setProcessName("deleting event streams") errors := make([]string, 0) - listOptions := metav1.ListOptions{ - LabelSelector: c.labelsSet(true).String(), - } - streams, err := c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions) - if err != nil { - return fmt.Errorf("could not list of FabricEventStreams: %v", err) - } - for _, stream := range streams.Items { - err = c.KubeClient.FabricEventStreams(stream.Namespace).Delete(context.TODO(), stream.Name, metav1.DeleteOptions{}) + + for appId := range c.Streams { + err := c.deleteStream(appId) if err != nil { - errors = append(errors, fmt.Sprintf("could not delete event stream %q: %v", stream.Name, err)) + errors = append(errors, fmt.Sprintf("%v", err)) } } @@ -68,7 +81,7 @@ func (c *Cluster) deleteStreams() error { return nil } -func gatherApplicationIds(streams []acidv1.Stream) []string { +func getDistinctApplicationIds(streams []acidv1.Stream) []string { appIds := make([]string, 0) for _, stream := range streams { if !util.SliceContains(appIds, stream.ApplicationId) { @@ -79,9 +92,10 @@ func gatherApplicationIds(streams []acidv1.Stream) []string { return appIds } -func (c *Cluster) syncPublication(publication, dbName string, tables map[string]acidv1.StreamTable) error { +func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]zalandov1.Slot, slotsToSync *map[string]map[string]string) error { createPublications := make(map[string]string) alterPublications := make(map[string]string) + deletePublications := []string{} defer func() { if err := c.closeDbConn(); err != nil { @@ -91,7 +105,7 @@ func (c *Cluster) syncPublication(publication, dbName string, tables map[string] // check for existing publications if err := c.initDbConnWithName(dbName); err != nil { - return fmt.Errorf("could not init database connection") + return fmt.Errorf("could not init database connection: %v", err) } currentPublications, err := c.getPublications() @@ -99,36 +113,70 @@ func (c *Cluster) syncPublication(publication, dbName string, tables map[string] return fmt.Errorf("could not get current publications: %v", err) } - tableNames := make([]string, len(tables)) - i := 0 - for t := range tables { - tableName, schemaName := getTableSchema(t) - tableNames[i] = fmt.Sprintf("%s.%s", schemaName, tableName) - i++ + for slotName, slotAndPublication := range databaseSlotsList { + newTables := slotAndPublication.Publication + tableNames := make([]string, len(newTables)) + i := 0 + for t := range newTables { + tableName, schemaName := getTableSchema(t) + tableNames[i] = fmt.Sprintf("%s.%s", schemaName, tableName) + i++ + } + sort.Strings(tableNames) + tableList := strings.Join(tableNames, ", ") + + currentTables, exists := currentPublications[slotName] + // if newTables is empty it means that it's definition was removed from streams section + // but when slot is defined in manifest we should sync publications, too + // by reusing current tables we make sure it is not + if len(newTables) == 0 { + tableList = currentTables + } + if !exists { + createPublications[slotName] = tableList + } else if currentTables != tableList { + alterPublications[slotName] = tableList + } else { + (*slotsToSync)[slotName] = slotAndPublication.Slot + } } - sort.Strings(tableNames) - tableList := strings.Join(tableNames, ", ") - currentTables, exists := currentPublications[publication] - if !exists { - createPublications[publication] = tableList - } else if currentTables != tableList { - alterPublications[publication] = tableList + // check if there is any deletion + for slotName := range currentPublications { + if _, exists := databaseSlotsList[slotName]; !exists { + deletePublications = append(deletePublications, slotName) + } } - if len(createPublications)+len(alterPublications) == 0 { + if len(createPublications)+len(alterPublications)+len(deletePublications) == 0 { return nil } + errors := make([]string, 0) for publicationName, tables := range createPublications { if err = c.executeCreatePublication(publicationName, tables); err != nil { - return fmt.Errorf("creation of publication %q failed: %v", publicationName, err) + errors = append(errors, fmt.Sprintf("creation of publication %q failed: %v", publicationName, err)) + continue } + (*slotsToSync)[publicationName] = databaseSlotsList[publicationName].Slot } for publicationName, tables := range alterPublications { if err = c.executeAlterPublication(publicationName, tables); err != nil { - return fmt.Errorf("update of publication %q failed: %v", publicationName, err) + errors = append(errors, fmt.Sprintf("update of publication %q failed: %v", publicationName, err)) + continue } + (*slotsToSync)[publicationName] = databaseSlotsList[publicationName].Slot + } + for _, publicationName := range deletePublications { + if err = c.executeDropPublication(publicationName); err != nil { + errors = append(errors, fmt.Sprintf("deletion of publication %q failed: %v", publicationName, err)) + continue + } + (*slotsToSync)[publicationName] = nil + } + + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, `', '`)) } return nil @@ -136,16 +184,25 @@ func (c *Cluster) syncPublication(publication, dbName string, tables map[string] func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEventStream { eventStreams := make([]zalandov1.EventStream, 0) + resourceAnnotations := map[string]string{} + var err, err2 error for _, stream := range c.Spec.Streams { if stream.ApplicationId != appId { continue } + + err = setResourceAnnotation(&resourceAnnotations, stream.CPU, constants.EventStreamCpuAnnotationKey) + err2 = setResourceAnnotation(&resourceAnnotations, stream.Memory, constants.EventStreamMemoryAnnotationKey) + if err != nil || err2 != nil { + c.logger.Warningf("could not set resource annotation for event stream: %v", err) + } + for tableName, table := range stream.Tables { streamSource := c.getEventStreamSource(stream, tableName, table.IdColumn) - streamFlow := getEventStreamFlow(stream, table.PayloadColumn) + streamFlow := getEventStreamFlow(table.PayloadColumn) streamSink := getEventStreamSink(stream, table.EventType) - streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType) + streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType, table.IgnoreRecovery) eventStreams = append(eventStreams, zalandov1.EventStream{ EventStreamFlow: streamFlow, @@ -162,11 +219,10 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent }, ObjectMeta: metav1.ObjectMeta{ // max length for cluster name is 58 so we can only add 5 more characters / numbers - Name: fmt.Sprintf("%s-%s", c.Name, strings.ToLower(util.RandomPassword(5))), - Namespace: c.Namespace, - Labels: c.labelsSet(true), - Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), - // make cluster StatefulSet the owner (like with connection pooler objects) + Name: fmt.Sprintf("%s-%s", c.Name, strings.ToLower(util.RandomPassword(5))), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.AnnotationsToPropagate(c.annotationsSet(resourceAnnotations)), OwnerReferences: c.ownerReferences(), }, Spec: zalandov1.FabricEventStreamSpec{ @@ -176,6 +232,27 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent } } +func setResourceAnnotation(annotations *map[string]string, resource *string, key string) error { + var ( + isSmaller bool + err error + ) + if resource != nil { + currentValue, exists := (*annotations)[key] + if exists { + isSmaller, err = util.IsSmallerQuantity(currentValue, *resource) + if err != nil { + return fmt.Errorf("could not compare resource in %q annotation: %v", key, err) + } + } + if isSmaller || !exists { + (*annotations)[key] = *resource + } + } + + return nil +} + func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, idColumn *string) zalandov1.EventStreamSource { table, schema := getTableSchema(tableName) streamFilter := stream.Filter[tableName] @@ -191,7 +268,7 @@ func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, i } } -func getEventStreamFlow(stream acidv1.Stream, payloadColumn *string) zalandov1.EventStreamFlow { +func getEventStreamFlow(payloadColumn *string) zalandov1.EventStreamFlow { return zalandov1.EventStreamFlow{ Type: constants.EventStreamFlowPgGenericType, PayloadColumn: payloadColumn, @@ -206,7 +283,7 @@ func getEventStreamSink(stream acidv1.Stream, eventType string) zalandov1.EventS } } -func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string) zalandov1.EventStreamRecovery { +func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string, ignoreRecovery *bool) zalandov1.EventStreamRecovery { if (stream.EnableRecovery != nil && !*stream.EnableRecovery) || (stream.EnableRecovery == nil && recoveryEventType == "") { return zalandov1.EventStreamRecovery{ @@ -214,6 +291,12 @@ func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType s } } + if ignoreRecovery != nil && *ignoreRecovery { + return zalandov1.EventStreamRecovery{ + Type: constants.EventStreamRecoveryIgnoreType, + } + } + if stream.EnableRecovery != nil && *stream.EnableRecovery && recoveryEventType == "" { recoveryEventType = fmt.Sprintf("%s-%s", eventType, constants.EventStreamRecoverySuffix) } @@ -269,59 +352,84 @@ func (c *Cluster) syncStreams() error { _, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{}) if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("event stream CRD not installed, skipping") + c.logger.Debug("event stream CRD not installed, skipping") return nil } - slots := make(map[string]map[string]string) + // create map with every database and empty slot defintion + // we need it to detect removal of streams from databases + if err := c.initDbConn(); err != nil { + return fmt.Errorf("could not init database connection") + } + defer func() { + if err := c.closeDbConn(); err != nil { + c.logger.Errorf("could not close database connection: %v", err) + } + }() + listDatabases, err := c.getDatabases() + if err != nil { + return fmt.Errorf("could not get list of databases: %v", err) + } + databaseSlots := make(map[string]map[string]zalandov1.Slot) + for dbName := range listDatabases { + if dbName != "template0" && dbName != "template1" { + databaseSlots[dbName] = map[string]zalandov1.Slot{} + } + } + + // need to take explicitly defined slots into account whey syncing Patroni config slotsToSync := make(map[string]map[string]string) - publications := make(map[string]map[string]acidv1.StreamTable) requiredPatroniConfig := c.Spec.Patroni - if len(requiredPatroniConfig.Slots) > 0 { - slots = requiredPatroniConfig.Slots + for slotName, slotConfig := range requiredPatroniConfig.Slots { + slotsToSync[slotName] = slotConfig + if _, exists := databaseSlots[slotConfig["database"]]; exists { + databaseSlots[slotConfig["database"]][slotName] = zalandov1.Slot{ + Slot: slotConfig, + Publication: make(map[string]acidv1.StreamTable), + } + } + } } - // gather list of required slots and publications + // get list of required slots and publications, group by database for _, stream := range c.Spec.Streams { + if _, exists := databaseSlots[stream.Database]; !exists { + c.logger.Warningf("database %q does not exist in the cluster", stream.Database) + continue + } slot := map[string]string{ "database": stream.Database, "plugin": constants.EventStreamSourcePluginType, "type": "logical", } slotName := getSlotName(stream.Database, stream.ApplicationId) - if _, exists := slots[slotName]; !exists { - slots[slotName] = slot - publications[slotName] = stream.Tables + slotAndPublication, exists := databaseSlots[stream.Database][slotName] + if !exists { + databaseSlots[stream.Database][slotName] = zalandov1.Slot{ + Slot: slot, + Publication: stream.Tables, + } } else { - streamTables := publications[slotName] + streamTables := slotAndPublication.Publication for tableName, table := range stream.Tables { if _, exists := streamTables[tableName]; !exists { streamTables[tableName] = table } } - publications[slotName] = streamTables + slotAndPublication.Publication = streamTables + databaseSlots[stream.Database][slotName] = slotAndPublication } } - // create publications to each created slot + // sync publication in a database c.logger.Debug("syncing database publications") - for publication, tables := range publications { - // but first check for existing publications - dbName := slots[publication]["database"] - err = c.syncPublication(publication, dbName, tables) + for dbName, databaseSlotsList := range databaseSlots { + err := c.syncPublication(dbName, databaseSlotsList, &slotsToSync) if err != nil { - c.logger.Warningf("could not sync publication %q in database %q: %v", publication, dbName, err) + c.logger.Warningf("could not sync all publications in database %q: %v", dbName, err) continue } - slotsToSync[publication] = slots[publication] - } - - // no slots to sync = no streams defined or publications created - if len(slotsToSync) > 0 { - requiredPatroniConfig.Slots = slotsToSync - } else { - return nil } c.logger.Debug("syncing logical replication slots") @@ -331,70 +439,145 @@ func (c *Cluster) syncStreams() error { } // sync logical replication slots in Patroni config + requiredPatroniConfig.Slots = slotsToSync configPatched, _, _, err := c.syncPatroniConfig(pods, requiredPatroniConfig, nil) if err != nil { c.logger.Warningf("Patroni config updated? %v - errors during config sync: %v", configPatched, err) } // finally sync stream CRDs - err = c.createOrUpdateStreams() - if err != nil { - return err + // get distinct application IDs from streams section + // there will be a separate event stream resource for each ID + appIds := getDistinctApplicationIds(c.Spec.Streams) + for _, appId := range appIds { + if hasSlotsInSync(appId, databaseSlots, slotsToSync) { + if err = c.syncStream(appId); err != nil { + c.logger.Warningf("could not sync event streams with applicationId %s: %v", appId, err) + } + } else { + c.logger.Warningf("database replication slots %#v for streams with applicationId %s not in sync, skipping event stream sync", slotsToSync, appId) + } + } + + // check if there is any deletion + if err = c.cleanupRemovedStreams(appIds); err != nil { + return fmt.Errorf("%v", err) } return nil } -func (c *Cluster) createOrUpdateStreams() error { +func hasSlotsInSync(appId string, databaseSlots map[string]map[string]zalandov1.Slot, slotsToSync map[string]map[string]string) bool { + allSlotsInSync := true + for dbName, slots := range databaseSlots { + for slotName := range slots { + if slotName == getSlotName(dbName, appId) { + if slot, exists := slotsToSync[slotName]; !exists || slot == nil { + allSlotsInSync = false + continue + } + } + } + } + + return allSlotsInSync +} - // fetch different application IDs from streams section - // there will be a separate event stream resource for each ID - appIds := gatherApplicationIds(c.Spec.Streams) +func (c *Cluster) syncStream(appId string) error { + var ( + streams *zalandov1.FabricEventStreamList + err error + ) + c.setProcessName("syncing stream with applicationId %s", appId) + c.logger.Debugf("syncing stream with applicationId %s", appId) - // list all existing stream CRDs listOptions := metav1.ListOptions{ - LabelSelector: c.labelsSet(true).String(), + LabelSelector: c.labelsSet(false).String(), } - streams, err := c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions) + streams, err = c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions) if err != nil { - return fmt.Errorf("could not list of FabricEventStreams: %v", err) + return fmt.Errorf("could not list of FabricEventStreams for applicationId %s: %v", appId, err) } - for _, appId := range appIds { - streamExists := false - - // update stream when it exists and EventStreams array differs - for _, stream := range streams.Items { - if appId == stream.Spec.ApplicationId { - streamExists = true - desiredStreams := c.generateFabricEventStream(appId) - if match, reason := sameStreams(stream.Spec.EventStreams, desiredStreams.Spec.EventStreams); !match { - c.logger.Debugf("updating event streams: %s", reason) - desiredStreams.ObjectMeta = stream.ObjectMeta - err = c.updateStreams(desiredStreams) - if err != nil { - return fmt.Errorf("failed updating event stream %s: %v", stream.Name, err) - } - c.logger.Infof("event stream %q has been successfully updated", stream.Name) - } - continue + streamExists := false + for _, stream := range streams.Items { + if stream.Spec.ApplicationId != appId { + continue + } + streamExists = true + c.Streams[appId] = &stream + desiredStreams := c.generateFabricEventStream(appId) + if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) { + c.logger.Infof("owner references of event streams with applicationId %s do not match the current ones", appId) + stream.ObjectMeta.OwnerReferences = desiredStreams.ObjectMeta.OwnerReferences + c.setProcessName("updating event streams with applicationId %s", appId) + updatedStream, err := c.KubeClient.FabricEventStreams(stream.Namespace).Update(context.TODO(), &stream, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update event streams with applicationId %s: %v", appId, err) } + c.Streams[appId] = updatedStream } - - if !streamExists { - c.logger.Infof("event streams with applicationId %s do not exist, create it", appId) - streamCRD, err := c.createStreams(appId) + if match, reason := c.compareStreams(&stream, desiredStreams); !match { + c.logger.Infof("updating event streams with applicationId %s: %s", appId, reason) + // make sure to keep the old name with randomly generated suffix + desiredStreams.ObjectMeta.Name = stream.ObjectMeta.Name + updatedStream, err := c.updateStreams(desiredStreams) if err != nil { - return fmt.Errorf("failed creating event streams with applicationId %s: %v", appId, err) + return fmt.Errorf("failed updating event streams %s with applicationId %s: %v", stream.Name, appId, err) } - c.logger.Infof("event streams %q have been successfully created", streamCRD.Name) + c.Streams[appId] = updatedStream + c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId) } + break + } + + if !streamExists { + c.logger.Infof("event streams with applicationId %s do not exist, create it", appId) + createdStream, err := c.createStreams(appId) + if err != nil { + return fmt.Errorf("failed creating event streams with applicationId %s: %v", appId, err) + } + c.logger.Infof("event streams %q have been successfully created", createdStream.Name) + c.Streams[appId] = createdStream } return nil } -func sameStreams(curEventStreams, newEventStreams []zalandov1.EventStream) (match bool, reason string) { +func (c *Cluster) compareStreams(curEventStreams, newEventStreams *zalandov1.FabricEventStream) (match bool, reason string) { + reasons := make([]string, 0) + desiredAnnotations := make(map[string]string) + match = true + + // stream operator can add extra annotations so incl. current annotations in desired annotations + for curKey, curValue := range curEventStreams.Annotations { + if _, exists := desiredAnnotations[curKey]; !exists { + desiredAnnotations[curKey] = curValue + } + } + // add/or override annotations if cpu and memory values were changed + for newKey, newValue := range newEventStreams.Annotations { + desiredAnnotations[newKey] = newValue + } + if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations, nil); changed { + match = false + reasons = append(reasons, fmt.Sprintf("new streams annotations do not match: %s", reason)) + } + + if !reflect.DeepEqual(curEventStreams.ObjectMeta.Labels, newEventStreams.ObjectMeta.Labels) { + match = false + reasons = append(reasons, "new streams labels do not match the current ones") + } + + if changed, reason := sameEventStreams(curEventStreams.Spec.EventStreams, newEventStreams.Spec.EventStreams); !changed { + match = false + reasons = append(reasons, fmt.Sprintf("new streams EventStreams array does not match : %s", reason)) + } + + return match, strings.Join(reasons, ", ") +} + +func sameEventStreams(curEventStreams, newEventStreams []zalandov1.EventStream) (match bool, reason string) { if len(newEventStreams) != len(curEventStreams) { return false, "number of defined streams is different" } @@ -418,3 +601,22 @@ func sameStreams(curEventStreams, newEventStreams []zalandov1.EventStream) (matc return true, "" } + +func (c *Cluster) cleanupRemovedStreams(appIds []string) error { + errors := make([]string, 0) + for appId := range c.Streams { + if !util.SliceContains(appIds, appId) { + c.logger.Infof("event streams with applicationId %s do not exist in the manifest, delete it", appId) + err := c.deleteStream(appId) + if err != nil { + errors = append(errors, fmt.Sprintf("failed deleting event streams with applicationId %s: %v", appId, err)) + } + } + } + + if len(errors) > 0 { + return fmt.Errorf("could not delete all removed event streams: %v", strings.Join(errors, `', '`)) + } + + return nil +} diff --git a/pkg/cluster/streams_test.go b/pkg/cluster/streams_test.go index 7030c914e..934f2bfd4 100644 --- a/pkg/cluster/streams_test.go +++ b/pkg/cluster/streams_test.go @@ -2,6 +2,7 @@ package cluster import ( "fmt" + "reflect" "strings" "context" @@ -18,29 +19,25 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/fake" ) -func newFakeK8sStreamClient() (k8sutil.KubernetesClient, *fake.Clientset) { - zalandoClientSet := fakezalandov1.NewSimpleClientset() - clientSet := fake.NewSimpleClientset() - - return k8sutil.KubernetesClient{ - FabricEventStreamsGetter: zalandoClientSet.ZalandoV1(), - PostgresqlsGetter: zalandoClientSet.AcidV1(), - PodsGetter: clientSet.CoreV1(), - StatefulSetsGetter: clientSet.AppsV1(), - }, clientSet -} - var ( - clusterName string = "acid-test-cluster" + clusterName string = "acid-stream-cluster" namespace string = "default" appId string = "test-app" dbName string = "foo" fesUser string = fmt.Sprintf("%s%s", constants.EventStreamSourceSlotPrefix, constants.UserRoleNameSuffix) slotName string = fmt.Sprintf("%s_%s_%s", constants.EventStreamSourceSlotPrefix, dbName, strings.Replace(appId, "-", "_", -1)) + zalandoClientSet = fakezalandov1.NewSimpleClientset() + + client = k8sutil.KubernetesClient{ + FabricEventStreamsGetter: zalandoClientSet.ZalandoV1(), + PostgresqlsGetter: zalandoClientSet.AcidV1(), + PodsGetter: clientSet.CoreV1(), + StatefulSetsGetter: clientSet.AppsV1(), + } + pg = acidv1.Postgresql{ TypeMeta: metav1.TypeMeta{ Kind: "Postgresql", @@ -59,21 +56,26 @@ var ( ApplicationId: appId, Database: "foo", Tables: map[string]acidv1.StreamTable{ - "data.bar": acidv1.StreamTable{ + "data.bar": { EventType: "stream-type-a", IdColumn: k8sutil.StringToPointer("b_id"), PayloadColumn: k8sutil.StringToPointer("b_payload"), }, - "data.foobar": acidv1.StreamTable{ + "data.foobar": { EventType: "stream-type-b", RecoveryEventType: "stream-type-b-dlq", }, + "data.foofoobar": { + EventType: "stream-type-c", + IgnoreRecovery: util.True(), + }, }, EnableRecovery: util.True(), Filter: map[string]*string{ "data.bar": k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"), }, BatchSize: k8sutil.UInt32ToPointer(uint32(100)), + CPU: k8sutil.StringToPointer("250m"), }, }, TeamID: "acid", @@ -91,8 +93,16 @@ var ( ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-12345", clusterName), Namespace: namespace, + Annotations: map[string]string{ + constants.EventStreamCpuAnnotationKey: "250m", + }, + Labels: map[string]string{ + "application": "spilo", + "cluster-name": clusterName, + "team": "acid", + }, OwnerReferences: []metav1.OwnerReference{ - metav1.OwnerReference{ + { APIVersion: "apps/v1", Kind: "StatefulSet", Name: "acid-test-cluster", @@ -103,7 +113,7 @@ var ( Spec: zalandov1.FabricEventStreamSpec{ ApplicationId: appId, EventStreams: []zalandov1.EventStream{ - zalandov1.EventStream{ + { EventStreamFlow: zalandov1.EventStreamFlow{ PayloadColumn: k8sutil.StringToPointer("b_payload"), Type: constants.EventStreamFlowPgGenericType, @@ -142,7 +152,7 @@ var ( Type: constants.EventStreamSourcePGType, }, }, - zalandov1.EventStream{ + { EventStreamFlow: zalandov1.EventStreamFlow{ Type: constants.EventStreamFlowPgGenericType, }, @@ -178,24 +188,42 @@ var ( Type: constants.EventStreamSourcePGType, }, }, + { + EventStreamFlow: zalandov1.EventStreamFlow{ + Type: constants.EventStreamFlowPgGenericType, + }, + EventStreamRecovery: zalandov1.EventStreamRecovery{ + Type: constants.EventStreamRecoveryIgnoreType, + }, + EventStreamSink: zalandov1.EventStreamSink{ + EventType: "stream-type-c", + MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)), + Type: constants.EventStreamSinkNakadiType, + }, + EventStreamSource: zalandov1.EventStreamSource{ + Connection: zalandov1.Connection{ + DBAuth: zalandov1.DBAuth{ + Name: fmt.Sprintf("fes-user.%s.credentials.postgresql.acid.zalan.do", clusterName), + PasswordKey: "password", + Type: constants.EventStreamSourceAuthType, + UserKey: "username", + }, + Url: fmt.Sprintf("jdbc:postgresql://%s.%s/foo?user=%s&ssl=true&sslmode=require", clusterName, namespace, fesUser), + SlotName: slotName, + PluginType: constants.EventStreamSourcePluginType, + }, + Schema: "data", + EventStreamTable: zalandov1.EventStreamTable{ + Name: "foofoobar", + }, + Type: constants.EventStreamSourcePGType, + }, + }, }, }, } -) -func TestGatherApplicationIds(t *testing.T) { - testAppIds := []string{appId} - appIds := gatherApplicationIds(pg.Spec.Streams) - - if !util.IsEqualIgnoreOrder(testAppIds, appIds) { - t.Errorf("gathered applicationIds do not match, expected %#v, got %#v", testAppIds, appIds) - } -} - -func TestGenerateFabricEventStream(t *testing.T) { - client, _ := newFakeK8sStreamClient() - - var cluster = New( + cluster = New( Config{ OpConfig: config.Config{ Auth: config.Auth{ @@ -213,60 +241,335 @@ func TestGenerateFabricEventStream(t *testing.T) { }, }, }, client, pg, logger, eventRecorder) +) + +func TestGatherApplicationIds(t *testing.T) { + testAppIds := []string{appId} + appIds := getDistinctApplicationIds(pg.Spec.Streams) + + if !util.IsEqualIgnoreOrder(testAppIds, appIds) { + t.Errorf("list of applicationIds does not match, expected %#v, got %#v", testAppIds, appIds) + } +} +func TestHasSlotsInSync(t *testing.T) { cluster.Name = clusterName cluster.Namespace = namespace - // create statefulset to have ownerReference for streams - _, err := cluster.createStatefulSet() - assert.NoError(t, err) + appId2 := fmt.Sprintf("%s-2", appId) + dbNotExists := "dbnotexists" + slotNotExists := fmt.Sprintf("%s_%s_%s", constants.EventStreamSourceSlotPrefix, dbNotExists, strings.Replace(appId, "-", "_", -1)) + slotNotExistsAppId2 := fmt.Sprintf("%s_%s_%s", constants.EventStreamSourceSlotPrefix, dbNotExists, strings.Replace(appId2, "-", "_", -1)) + + tests := []struct { + subTest string + applicationId string + expectedSlots map[string]map[string]zalandov1.Slot + actualSlots map[string]map[string]string + slotsInSync bool + }{ + { + subTest: fmt.Sprintf("slots in sync for applicationId %s", appId), + applicationId: appId, + expectedSlots: map[string]map[string]zalandov1.Slot{ + dbName: { + slotName: zalandov1.Slot{ + Slot: map[string]string{ + "databases": dbName, + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + Publication: map[string]acidv1.StreamTable{ + "test1": { + EventType: "stream-type-a", + }, + }, + }, + }, + }, + actualSlots: map[string]map[string]string{ + slotName: { + "databases": dbName, + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + }, + slotsInSync: true, + }, { + subTest: fmt.Sprintf("slots empty for applicationId %s after create or update of publication failed", appId), + applicationId: appId, + expectedSlots: map[string]map[string]zalandov1.Slot{ + dbNotExists: { + slotNotExists: zalandov1.Slot{ + Slot: map[string]string{ + "databases": dbName, + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + Publication: map[string]acidv1.StreamTable{ + "test1": { + EventType: "stream-type-a", + }, + }, + }, + }, + }, + actualSlots: map[string]map[string]string{}, + slotsInSync: false, + }, { + subTest: fmt.Sprintf("slot with empty definition for applicationId %s after publication git deleted", appId), + applicationId: appId, + expectedSlots: map[string]map[string]zalandov1.Slot{ + dbNotExists: { + slotNotExists: zalandov1.Slot{ + Slot: map[string]string{ + "databases": dbName, + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + Publication: map[string]acidv1.StreamTable{ + "test1": { + EventType: "stream-type-a", + }, + }, + }, + }, + }, + actualSlots: map[string]map[string]string{ + slotName: nil, + }, + slotsInSync: false, + }, { + subTest: fmt.Sprintf("one slot not in sync for applicationId %s because database does not exist", appId), + applicationId: appId, + expectedSlots: map[string]map[string]zalandov1.Slot{ + dbName: { + slotName: zalandov1.Slot{ + Slot: map[string]string{ + "databases": dbName, + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + Publication: map[string]acidv1.StreamTable{ + "test1": { + EventType: "stream-type-a", + }, + }, + }, + }, + dbNotExists: { + slotNotExists: zalandov1.Slot{ + Slot: map[string]string{ + "databases": "dbnotexists", + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + Publication: map[string]acidv1.StreamTable{ + "test2": { + EventType: "stream-type-b", + }, + }, + }, + }, + }, + actualSlots: map[string]map[string]string{ + slotName: { + "databases": dbName, + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + }, + slotsInSync: false, + }, { + subTest: fmt.Sprintf("slots in sync for applicationId %s, but not for %s - checking %s should return true", appId, appId2, appId), + applicationId: appId, + expectedSlots: map[string]map[string]zalandov1.Slot{ + dbName: { + slotName: zalandov1.Slot{ + Slot: map[string]string{ + "databases": dbName, + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + Publication: map[string]acidv1.StreamTable{ + "test1": { + EventType: "stream-type-a", + }, + }, + }, + }, + dbNotExists: { + slotNotExistsAppId2: zalandov1.Slot{ + Slot: map[string]string{ + "databases": "dbnotexists", + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + Publication: map[string]acidv1.StreamTable{ + "test2": { + EventType: "stream-type-b", + }, + }, + }, + }, + }, + actualSlots: map[string]map[string]string{ + slotName: { + "databases": dbName, + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + }, + slotsInSync: true, + }, { + subTest: fmt.Sprintf("slots in sync for applicationId %s, but not for %s - checking %s should return false", appId, appId2, appId2), + applicationId: appId2, + expectedSlots: map[string]map[string]zalandov1.Slot{ + dbName: { + slotName: zalandov1.Slot{ + Slot: map[string]string{ + "databases": dbName, + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + Publication: map[string]acidv1.StreamTable{ + "test1": { + EventType: "stream-type-a", + }, + }, + }, + }, + dbNotExists: { + slotNotExistsAppId2: zalandov1.Slot{ + Slot: map[string]string{ + "databases": "dbnotexists", + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + Publication: map[string]acidv1.StreamTable{ + "test2": { + EventType: "stream-type-b", + }, + }, + }, + }, + }, + actualSlots: map[string]map[string]string{ + slotName: { + "databases": dbName, + "plugin": constants.EventStreamSourcePluginType, + "type": "logical", + }, + }, + slotsInSync: false, + }, + } + + for _, tt := range tests { + result := hasSlotsInSync(tt.applicationId, tt.expectedSlots, tt.actualSlots) + if result != tt.slotsInSync { + t.Errorf("%s: unexpected result for slot test of applicationId: %v, expected slots %#v, actual slots %#v", tt.subTest, tt.applicationId, tt.expectedSlots, tt.actualSlots) + } + } +} + +func TestGenerateFabricEventStream(t *testing.T) { + cluster.Name = clusterName + cluster.Namespace = namespace // create the streams - err = cluster.createOrUpdateStreams() + err := cluster.syncStream(appId) assert.NoError(t, err) // compare generated stream with expected stream result := cluster.generateFabricEventStream(appId) - if match, _ := sameStreams(result.Spec.EventStreams, fes.Spec.EventStreams); !match { + if match, _ := cluster.compareStreams(result, fes); !match { t.Errorf("malformed FabricEventStream, expected %#v, got %#v", fes, result) } listOptions := metav1.ListOptions{ - LabelSelector: cluster.labelsSet(true).String(), + LabelSelector: cluster.labelsSet(false).String(), } streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) assert.NoError(t, err) - - // check if there is only one stream - if len(streams.Items) > 1 { - t.Errorf("too many stream CRDs found: got %d, but expected only one", len(streams.Items)) - } + assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only one", len(streams.Items)) // compare stream returned from API with expected stream - if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, fes.Spec.EventStreams); !match { + if match, _ := cluster.compareStreams(&streams.Items[0], fes); !match { t.Errorf("malformed FabricEventStream returned from API, expected %#v, got %#v", fes, streams.Items[0]) } // sync streams once again - err = cluster.createOrUpdateStreams() + err = cluster.syncStream(appId) assert.NoError(t, err) streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) assert.NoError(t, err) - - // check if there is still only one stream - if len(streams.Items) > 1 { - t.Errorf("too many stream CRDs found after sync: got %d, but expected only one", len(streams.Items)) - } + assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only one", len(streams.Items)) // compare stream resturned from API with generated stream - if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match { + if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { t.Errorf("returned FabricEventStream differs from generated one, expected %#v, got %#v", result, streams.Items[0]) } } +func newFabricEventStream(streams []zalandov1.EventStream, annotations map[string]string) *zalandov1.FabricEventStream { + return &zalandov1.FabricEventStream{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-12345", clusterName), + Annotations: annotations, + }, + Spec: zalandov1.FabricEventStreamSpec{ + ApplicationId: appId, + EventStreams: streams, + }, + } +} + +func TestSyncStreams(t *testing.T) { + newClusterName := fmt.Sprintf("%s-2", pg.Name) + pg.Name = newClusterName + var cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + _, err := cluster.KubeClient.Postgresqls(namespace).Create( + context.TODO(), &pg, metav1.CreateOptions{}) + assert.NoError(t, err) + + // create the stream + err = cluster.syncStream(appId) + assert.NoError(t, err) + + // sync the stream again + err = cluster.syncStream(appId) + assert.NoError(t, err) + + // check that only one stream remains after sync + listOptions := metav1.ListOptions{ + LabelSelector: cluster.labelsSet(false).String(), + } + streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items)) +} + func TestSameStreams(t *testing.T) { testName := "TestSameStreams" + annotationsA := map[string]string{constants.EventStreamMemoryAnnotationKey: "500Mi"} + annotationsB := map[string]string{constants.EventStreamMemoryAnnotationKey: "1Gi"} stream1 := zalandov1.EventStream{ EventStreamFlow: zalandov1.EventStreamFlow{}, @@ -294,74 +597,110 @@ func TestSameStreams(t *testing.T) { }, } + stream3 := zalandov1.EventStream{ + EventStreamFlow: zalandov1.EventStreamFlow{}, + EventStreamRecovery: zalandov1.EventStreamRecovery{ + Type: constants.EventStreamRecoveryNoneType, + }, + EventStreamSink: zalandov1.EventStreamSink{ + EventType: "stream-type-b", + }, + EventStreamSource: zalandov1.EventStreamSource{ + EventStreamTable: zalandov1.EventStreamTable{ + Name: "bar", + }, + }, + } + tests := []struct { subTest string - streamsA []zalandov1.EventStream - streamsB []zalandov1.EventStream + streamsA *zalandov1.FabricEventStream + streamsB *zalandov1.FabricEventStream match bool reason string }{ { subTest: "identical streams", - streamsA: []zalandov1.EventStream{stream1, stream2}, - streamsB: []zalandov1.EventStream{stream1, stream2}, + streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, annotationsA), + streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, annotationsA), match: true, reason: "", }, { subTest: "same streams different order", - streamsA: []zalandov1.EventStream{stream1, stream2}, - streamsB: []zalandov1.EventStream{stream2, stream1}, + streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), + streamsB: newFabricEventStream([]zalandov1.EventStream{stream2, stream1}, nil), match: true, reason: "", }, { subTest: "same streams different order", - streamsA: []zalandov1.EventStream{stream1}, - streamsB: []zalandov1.EventStream{stream1, stream2}, + streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil), + streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), match: false, - reason: "number of defined streams is different", + reason: "new streams EventStreams array does not match : number of defined streams is different", }, { subTest: "different number of streams", - streamsA: []zalandov1.EventStream{stream1}, - streamsB: []zalandov1.EventStream{stream1, stream2}, + streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil), + streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), match: false, - reason: "number of defined streams is different", + reason: "new streams EventStreams array does not match : number of defined streams is different", }, { subTest: "event stream specs differ", - streamsA: []zalandov1.EventStream{stream1, stream2}, - streamsB: fes.Spec.EventStreams, + streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), + streamsB: fes, match: false, - reason: "number of defined streams is different", + reason: "new streams annotations do not match: Added \"fes.zalando.org/FES_CPU\" with value \"250m\"., new streams labels do not match the current ones, new streams EventStreams array does not match : number of defined streams is different", + }, + { + subTest: "event stream recovery specs differ", + streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil), + streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, nil), + match: false, + reason: "new streams EventStreams array does not match : event stream specs differ", + }, + { + subTest: "event stream with new annotations", + streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil), + streamsB: newFabricEventStream([]zalandov1.EventStream{stream2}, annotationsA), + match: false, + reason: "new streams annotations do not match: Added \"fes.zalando.org/FES_MEMORY\" with value \"500Mi\".", + }, + { + subTest: "event stream annotations differ", + streamsA: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsA), + streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsB), + match: false, + reason: "new streams annotations do not match: \"fes.zalando.org/FES_MEMORY\" changed from \"500Mi\" to \"1Gi\".", }, } for _, tt := range tests { - streamsMatch, matchReason := sameStreams(tt.streamsA, tt.streamsB) - if streamsMatch != tt.match { - t.Errorf("%s %s: unexpected match result when comparing streams: got %s, epxected %s", + streamsMatch, matchReason := cluster.compareStreams(tt.streamsA, tt.streamsB) + if streamsMatch != tt.match || matchReason != tt.reason { + t.Errorf("%s %s: unexpected match result when comparing streams: got %s, expected %s", testName, tt.subTest, matchReason, tt.reason) } } } -func TestUpdateFabricEventStream(t *testing.T) { - client, _ := newFakeK8sStreamClient() - +func TestUpdateStreams(t *testing.T) { + pg.Name = fmt.Sprintf("%s-3", pg.Name) var cluster = New( Config{ OpConfig: config.Config{ PodManagementPolicy: "ordered_ready", Resources: config.Resources{ - ClusterLabels: map[string]string{"application": "spilo"}, - ClusterNameLabel: "cluster-name", - DefaultCPURequest: "300m", - DefaultCPULimit: "300m", - DefaultMemoryRequest: "300Mi", - DefaultMemoryLimit: "300Mi", - PodRoleLabel: "spilo-role", + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + EnableOwnerReferences: util.True(), + PodRoleLabel: "spilo-role", }, }, }, client, pg, logger, eventRecorder) @@ -370,14 +709,31 @@ func TestUpdateFabricEventStream(t *testing.T) { context.TODO(), &pg, metav1.CreateOptions{}) assert.NoError(t, err) - // create statefulset to have ownerReference for streams - _, err = cluster.createStatefulSet() + // create stream with different owner reference + fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name) + fes.ObjectMeta.Labels["cluster-name"] = pg.Name + createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create( + context.TODO(), fes, metav1.CreateOptions{}) assert.NoError(t, err) + assert.Equal(t, createdStream.Spec.ApplicationId, appId) - // now create the stream - err = cluster.createOrUpdateStreams() + // sync the stream which should update the owner reference + err = cluster.syncStream(appId) assert.NoError(t, err) + // check that only one stream exists after sync + listOptions := metav1.ListOptions{ + LabelSelector: cluster.labelsSet(true).String(), + } + streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items)) + + // compare owner references + if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) { + t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences) + } + // change specs of streams and patch CRD for i, stream := range pg.Spec.Streams { if stream.ApplicationId == appId { @@ -389,7 +745,30 @@ func TestUpdateFabricEventStream(t *testing.T) { } } - patchData, err := specPatch(pg.Spec) + // compare stream returned from API with expected stream + streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) + result := cluster.generateFabricEventStream(appId) + if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { + t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result) + } + + // disable recovery + for idx, stream := range pg.Spec.Streams { + if stream.ApplicationId == appId { + stream.EnableRecovery = util.False() + pg.Spec.Streams[idx] = stream + } + } + + streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) + result = cluster.generateFabricEventStream(appId) + if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { + t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result) + } +} + +func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) { + patchData, err := specPatch(pgSpec) assert.NoError(t, err) pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch( @@ -397,18 +776,116 @@ func TestUpdateFabricEventStream(t *testing.T) { assert.NoError(t, err) cluster.Postgresql.Spec = pgPatched.Spec - err = cluster.createOrUpdateStreams() + err = cluster.syncStream(appId) + assert.NoError(t, err) + + streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + + return streams +} + +func TestDeleteStreams(t *testing.T) { + pg.Name = fmt.Sprintf("%s-4", pg.Name) + var cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + _, err := cluster.KubeClient.Postgresqls(namespace).Create( + context.TODO(), &pg, metav1.CreateOptions{}) + assert.NoError(t, err) + + // create the stream + err = cluster.syncStream(appId) assert.NoError(t, err) + // change specs of streams and patch CRD + for i, stream := range pg.Spec.Streams { + if stream.ApplicationId == appId { + streamTable := stream.Tables["data.bar"] + streamTable.EventType = "stream-type-c" + stream.Tables["data.bar"] = streamTable + stream.BatchSize = k8sutil.UInt32ToPointer(uint32(250)) + pg.Spec.Streams[i] = stream + } + } + // compare stream returned from API with expected stream listOptions := metav1.ListOptions{ - LabelSelector: cluster.labelsSet(true).String(), + LabelSelector: cluster.labelsSet(false).String(), } - streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) - assert.NoError(t, err) - + streams := patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) result := cluster.generateFabricEventStream(appId) - if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match { - t.Errorf("Malformed FabricEventStream, expected %#v, got %#v", streams.Items[0], result) + if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { + t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result) } + + // change teamId and check that stream is updated + pg.Spec.TeamID = "new-team" + streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) + result = cluster.generateFabricEventStream(appId) + if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { + t.Errorf("Malformed FabricEventStream after updating teamId, expected %#v, got %#v", streams.Items[0].ObjectMeta.Labels, result.ObjectMeta.Labels) + } + + // disable recovery + for idx, stream := range pg.Spec.Streams { + if stream.ApplicationId == appId { + stream.EnableRecovery = util.False() + pg.Spec.Streams[idx] = stream + } + } + + streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) + result = cluster.generateFabricEventStream(appId) + if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { + t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result) + } + + // remove streams from manifest + pg.Spec.Streams = nil + pgUpdated, err := cluster.KubeClient.Postgresqls(namespace).Update( + context.TODO(), &pg, metav1.UpdateOptions{}) + assert.NoError(t, err) + + appIds := getDistinctApplicationIds(pgUpdated.Spec.Streams) + cluster.cleanupRemovedStreams(appIds) + + // check that streams have been deleted + streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items)) + + // create stream to test deleteStreams code + fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name) + fes.ObjectMeta.Labels["cluster-name"] = pg.Name + _, err = cluster.KubeClient.FabricEventStreams(namespace).Create( + context.TODO(), fes, metav1.CreateOptions{}) + assert.NoError(t, err) + + // sync it once to cluster struct + err = cluster.syncStream(appId) + assert.NoError(t, err) + + // we need a mock client because deleteStreams checks for CRD existance + mockClient := k8sutil.NewMockKubernetesClient() + cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter + cluster.deleteStreams() + + // check that streams have been deleted + streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items)) } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index bd31271f4..797e7a5aa 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -15,10 +15,13 @@ import ( "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) var requirePrimaryRestartWhenDecreased = []string{ @@ -40,14 +43,28 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { c.setSpec(newSpec) defer func() { + var ( + pgUpdatedStatus *acidv1.Postgresql + errStatus error + ) if err != nil { c.logger.Warningf("error while syncing cluster state: %v", err) - c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed) + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusSyncFailed) } else if !c.Status.Running() { - c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) + pgUpdatedStatus, errStatus = c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusRunning) + } + if errStatus != nil { + c.logger.Warningf("could not set cluster status: %v", errStatus) + } + if pgUpdatedStatus != nil { + c.setSpec(pgUpdatedStatus) } }() + if err = c.syncFinalizer(); err != nil { + c.logger.Debugf("could not sync finalizers: %v", err) + } + if err = c.initUsers(); err != nil { err = fmt.Errorf("could not init users: %v", err) return err @@ -64,6 +81,10 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { return err } + if err = c.syncPatroniResources(); err != nil { + c.logger.Errorf("could not sync Patroni resources: %v", err) + } + // sync volume may already transition volumes to gp3, if iops/throughput or type is specified if err = c.syncVolumes(); err != nil { return err @@ -76,7 +97,11 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { } } - c.logger.Debug("syncing statefulsets") + if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) { + // do not apply any major version related changes yet + newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion + } + if err = c.syncStatefulSet(); err != nil { if !k8sutil.ResourceAlreadyExists(err) { err = fmt.Errorf("could not sync statefulsets: %v", err) @@ -84,9 +109,16 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { } } + // add or remove standby_cluster section from Patroni config depending on changes in standby section + if !reflect.DeepEqual(oldSpec.Spec.StandbyCluster, newSpec.Spec.StandbyCluster) { + if err := c.syncStandbyClusterConfiguration(); err != nil { + return fmt.Errorf("could not sync StandbyCluster configuration: %v", err) + } + } + c.logger.Debug("syncing pod disruption budgets") - if err = c.syncPodDisruptionBudget(false); err != nil { - err = fmt.Errorf("could not sync pod disruption budget: %v", err) + if err = c.syncPodDisruptionBudgets(false); err != nil { + err = fmt.Errorf("could not sync pod disruption budgets: %v", err) return err } @@ -121,7 +153,10 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { return fmt.Errorf("could not sync connection pooler: %v", err) } - if len(c.Spec.Streams) > 0 { + // sync if manifest stream count is different from stream CR count + // it can be that they are always different due to grouping of manifest streams + // but we would catch missed removals on update + if len(c.Spec.Streams) != len(c.Streams) { c.logger.Debug("syncing streams") if err = c.syncStreams(); err != nil { err = fmt.Errorf("could not sync streams: %v", err) @@ -137,6 +172,181 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { return err } +func (c *Cluster) syncFinalizer() error { + var err error + if c.OpConfig.EnableFinalizers != nil && *c.OpConfig.EnableFinalizers { + err = c.addFinalizer() + } else { + err = c.removeFinalizer() + } + if err != nil { + return fmt.Errorf("could not sync finalizer: %v", err) + } + + return nil +} + +func (c *Cluster) syncPatroniResources() error { + errors := make([]string, 0) + + if err := c.syncPatroniService(); err != nil { + errors = append(errors, fmt.Sprintf("could not sync %s service: %v", Patroni, err)) + } + + for _, suffix := range patroniObjectSuffixes { + if c.patroniKubernetesUseConfigMaps() { + if err := c.syncPatroniConfigMap(suffix); err != nil { + errors = append(errors, fmt.Sprintf("could not sync %s Patroni config map: %v", suffix, err)) + } + } else { + if err := c.syncPatroniEndpoint(suffix); err != nil { + errors = append(errors, fmt.Sprintf("could not sync %s Patroni endpoint: %v", suffix, err)) + } + } + } + + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, `', '`)) + } + + return nil +} + +func (c *Cluster) syncPatroniConfigMap(suffix string) error { + var ( + cm *v1.ConfigMap + err error + ) + configMapName := fmt.Sprintf("%s-%s", c.Name, suffix) + c.logger.Debugf("syncing %s config map", configMapName) + c.setProcessName("syncing %s config map", configMapName) + + if cm, err = c.KubeClient.ConfigMaps(c.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}); err == nil { + c.PatroniConfigMaps[suffix] = cm + desiredOwnerRefs := c.ownerReferences() + if !reflect.DeepEqual(cm.ObjectMeta.OwnerReferences, desiredOwnerRefs) { + c.logger.Infof("new %s config map's owner references do not match the current ones", configMapName) + cm.ObjectMeta.OwnerReferences = desiredOwnerRefs + c.setProcessName("updating %s config map", configMapName) + cm, err = c.KubeClient.ConfigMaps(c.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update %s config map: %v", configMapName, err) + } + c.PatroniConfigMaps[suffix] = cm + } + annotations := make(map[string]string) + maps.Copy(annotations, cm.Annotations) + // Patroni can add extra annotations so incl. current annotations in desired annotations + desiredAnnotations := c.annotationsSet(cm.Annotations) + if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed { + patchData, err := metaAnnotationsPatch(desiredAnnotations) + if err != nil { + return fmt.Errorf("could not form patch for %s config map: %v", configMapName, err) + } + cm, err = c.KubeClient.ConfigMaps(c.Namespace).Patch(context.TODO(), configMapName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("could not patch annotations of %s config map: %v", configMapName, err) + } + c.PatroniConfigMaps[suffix] = cm + } + } else if !k8sutil.ResourceNotFound(err) { + // if config map does not exist yet, Patroni should create it + return fmt.Errorf("could not get %s config map: %v", configMapName, err) + } + + return nil +} + +func (c *Cluster) syncPatroniEndpoint(suffix string) error { + var ( + ep *v1.Endpoints + err error + ) + endpointName := fmt.Sprintf("%s-%s", c.Name, suffix) + c.logger.Debugf("syncing %s endpoint", endpointName) + c.setProcessName("syncing %s endpoint", endpointName) + + if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), endpointName, metav1.GetOptions{}); err == nil { + c.PatroniEndpoints[suffix] = ep + desiredOwnerRefs := c.ownerReferences() + if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredOwnerRefs) { + c.logger.Infof("new %s endpoints's owner references do not match the current ones", endpointName) + ep.ObjectMeta.OwnerReferences = desiredOwnerRefs + c.setProcessName("updating %s endpoint", endpointName) + ep, err = c.KubeClient.Endpoints(c.Namespace).Update(context.TODO(), ep, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update %s endpoint: %v", endpointName, err) + } + c.PatroniEndpoints[suffix] = ep + } + annotations := make(map[string]string) + maps.Copy(annotations, ep.Annotations) + // Patroni can add extra annotations so incl. current annotations in desired annotations + desiredAnnotations := c.annotationsSet(ep.Annotations) + if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed { + patchData, err := metaAnnotationsPatch(desiredAnnotations) + if err != nil { + return fmt.Errorf("could not form patch for %s endpoint: %v", endpointName, err) + } + ep, err = c.KubeClient.Endpoints(c.Namespace).Patch(context.TODO(), endpointName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("could not patch annotations of %s endpoint: %v", endpointName, err) + } + c.PatroniEndpoints[suffix] = ep + } + } else if !k8sutil.ResourceNotFound(err) { + // if endpoint does not exist yet, Patroni should create it + return fmt.Errorf("could not get %s endpoint: %v", endpointName, err) + } + + return nil +} + +func (c *Cluster) syncPatroniService() error { + var ( + svc *v1.Service + err error + ) + serviceName := fmt.Sprintf("%s-%s", c.Name, Patroni) + c.logger.Debugf("syncing %s service", serviceName) + c.setProcessName("syncing %s service", serviceName) + + if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}); err == nil { + c.Services[Patroni] = svc + desiredOwnerRefs := c.ownerReferences() + if !reflect.DeepEqual(svc.ObjectMeta.OwnerReferences, desiredOwnerRefs) { + c.logger.Infof("new %s service's owner references do not match the current ones", serviceName) + svc.ObjectMeta.OwnerReferences = desiredOwnerRefs + c.setProcessName("updating %v service", serviceName) + svc, err = c.KubeClient.Services(c.Namespace).Update(context.TODO(), svc, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update %s service: %v", serviceName, err) + } + c.Services[Patroni] = svc + } + annotations := make(map[string]string) + maps.Copy(annotations, svc.Annotations) + // Patroni can add extra annotations so incl. current annotations in desired annotations + desiredAnnotations := c.annotationsSet(svc.Annotations) + if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed { + patchData, err := metaAnnotationsPatch(desiredAnnotations) + if err != nil { + return fmt.Errorf("could not form patch for %s service: %v", serviceName, err) + } + svc, err = c.KubeClient.Services(c.Namespace).Patch(context.TODO(), serviceName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("could not patch annotations of %s service: %v", serviceName, err) + } + c.Services[Patroni] = svc + } + } else if !k8sutil.ResourceNotFound(err) { + // if config service does not exist yet, Patroni should create it + return fmt.Errorf("could not get %s service: %v", serviceName, err) + } + + return nil +} + func (c *Cluster) syncServices() error { for _, role := range []PostgresRole{Master, Replica} { c.logger.Debugf("syncing %s service", role) @@ -164,22 +374,17 @@ func (c *Cluster) syncService(role PostgresRole) error { if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil { c.Services[role] = svc desiredSvc := c.generateService(role, &c.Spec) - if match, reason := c.compareServices(svc, desiredSvc); !match { - c.logServiceChanges(role, svc, desiredSvc, false, reason) - updatedSvc, err := c.updateService(role, svc, desiredSvc) - if err != nil { - return fmt.Errorf("could not update %s service to match desired state: %v", role, err) - } - c.Services[role] = updatedSvc - c.logger.Infof("%s service %q is in the desired state now", role, util.NameFromMeta(desiredSvc.ObjectMeta)) + updatedSvc, err := c.updateService(role, svc, desiredSvc) + if err != nil { + return fmt.Errorf("could not update %s service to match desired state: %v", role, err) } + c.Services[role] = updatedSvc return nil } if !k8sutil.ResourceNotFound(err) { return fmt.Errorf("could not get %s service: %v", role, err) } // no existing service, create new one - c.Services[role] = nil c.logger.Infof("could not find the cluster's %s service", role) if svc, err = c.createService(role); err == nil { @@ -204,8 +409,28 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { ) c.setProcessName("syncing %s endpoint", role) - if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.endpointName(role), metav1.GetOptions{}); err == nil { - // TODO: No syncing of endpoints here, is this covered completely by updateService? + if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err == nil { + desiredEp := c.generateEndpoint(role, ep.Subsets) + // if owner references differ we update which would also change annotations + if !reflect.DeepEqual(ep.ObjectMeta.OwnerReferences, desiredEp.ObjectMeta.OwnerReferences) { + c.logger.Infof("new %s endpoints's owner references do not match the current ones", role) + c.setProcessName("updating %v endpoint", role) + ep, err = c.KubeClient.Endpoints(c.Namespace).Update(context.TODO(), desiredEp, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update %s endpoint: %v", role, err) + } + } else { + if changed, _ := c.compareAnnotations(ep.Annotations, desiredEp.Annotations, nil); changed { + patchData, err := metaAnnotationsPatch(desiredEp.Annotations) + if err != nil { + return fmt.Errorf("could not form patch for %s endpoint: %v", role, err) + } + ep, err = c.KubeClient.Endpoints(c.Namespace).Patch(context.TODO(), c.serviceName(role), types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("could not patch annotations of %s endpoint: %v", role, err) + } + } + } c.Endpoints[role] = ep return nil } @@ -213,7 +438,6 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { return fmt.Errorf("could not get %s endpoint: %v", role, err) } // no existing endpoint, create new one - c.Endpoints[role] = nil c.logger.Infof("could not find the cluster's %s endpoint", role) if ep, err = c.createEndpoint(role); err == nil { @@ -223,7 +447,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { return fmt.Errorf("could not create missing %s endpoint: %v", role, err) } c.logger.Infof("%s endpoint %q already exists", role, util.NameFromMeta(ep.ObjectMeta)) - if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.endpointName(role), metav1.GetOptions{}); err != nil { + if ep, err = c.KubeClient.Endpoints(c.Namespace).Get(context.TODO(), c.serviceName(role), metav1.GetOptions{}); err != nil { return fmt.Errorf("could not fetch existing %s endpoint: %v", role, err) } } @@ -231,21 +455,61 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { return nil } -func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { +func (c *Cluster) syncPrimaryPodDisruptionBudget(isUpdate bool) error { + var ( + pdb *policyv1.PodDisruptionBudget + err error + ) + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.PrimaryPodDisruptionBudgetName(), metav1.GetOptions{}); err == nil { + c.PrimaryPodDisruptionBudget = pdb + newPDB := c.generatePrimaryPodDisruptionBudget() + match, reason := c.comparePodDisruptionBudget(pdb, newPDB) + if !match { + c.logPDBChanges(pdb, newPDB, isUpdate, reason) + if err = c.updatePrimaryPodDisruptionBudget(newPDB); err != nil { + return err + } + } else { + c.PrimaryPodDisruptionBudget = pdb + } + return nil + + } + if !k8sutil.ResourceNotFound(err) { + return fmt.Errorf("could not get pod disruption budget: %v", err) + } + // no existing pod disruption budget, create new one + c.logger.Infof("could not find the primary pod disruption budget") + + if err = c.createPrimaryPodDisruptionBudget(); err != nil { + if !k8sutil.ResourceAlreadyExists(err) { + return fmt.Errorf("could not create primary pod disruption budget: %v", err) + } + c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta)) + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.PrimaryPodDisruptionBudgetName(), metav1.GetOptions{}); err != nil { + return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta)) + } + } + + return nil +} + +func (c *Cluster) syncCriticalOpPodDisruptionBudget(isUpdate bool) error { var ( pdb *policyv1.PodDisruptionBudget err error ) - if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil { - c.PodDisruptionBudget = pdb - newPDB := c.generatePodDisruptionBudget() - if match, reason := k8sutil.SamePDB(pdb, newPDB); !match { + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.criticalOpPodDisruptionBudgetName(), metav1.GetOptions{}); err == nil { + c.CriticalOpPodDisruptionBudget = pdb + newPDB := c.generateCriticalOpPodDisruptionBudget() + match, reason := c.comparePodDisruptionBudget(pdb, newPDB) + if !match { c.logPDBChanges(pdb, newPDB, isUpdate, reason) - if err = c.updatePodDisruptionBudget(newPDB); err != nil { + if err = c.updateCriticalOpPodDisruptionBudget(newPDB); err != nil { return err } } else { - c.PodDisruptionBudget = pdb + c.CriticalOpPodDisruptionBudget = pdb } return nil @@ -254,22 +518,35 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { return fmt.Errorf("could not get pod disruption budget: %v", err) } // no existing pod disruption budget, create new one - c.PodDisruptionBudget = nil - c.logger.Infof("could not find the cluster's pod disruption budget") + c.logger.Infof("could not find pod disruption budget for critical operations") - if pdb, err = c.createPodDisruptionBudget(); err != nil { + if err = c.createCriticalOpPodDisruptionBudget(); err != nil { if !k8sutil.ResourceAlreadyExists(err) { - return fmt.Errorf("could not create pod disruption budget: %v", err) + return fmt.Errorf("could not create pod disruption budget for critical operations: %v", err) } c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta)) - if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err != nil { + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.criticalOpPodDisruptionBudgetName(), metav1.GetOptions{}); err != nil { return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta)) } } - c.logger.Infof("created missing pod disruption budget %q", util.NameFromMeta(pdb.ObjectMeta)) - c.PodDisruptionBudget = pdb + return nil +} + +func (c *Cluster) syncPodDisruptionBudgets(isUpdate bool) error { + errors := make([]string, 0) + + if err := c.syncPrimaryPodDisruptionBudget(isUpdate); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + + if err := c.syncCriticalOpPodDisruptionBudget(isUpdate); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, `', '`)) + } return nil } @@ -281,6 +558,7 @@ func (c *Cluster) syncStatefulSet() error { ) podsToRecreate := make([]v1.Pod, 0) isSafeToRecreatePods := true + postponeReasons := make([]string, 0) switchoverCandidates := make([]spec.NamespacedName, 0) pods, err := c.listPods() @@ -290,12 +568,12 @@ func (c *Cluster) syncStatefulSet() error { // NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early. sset, err := c.KubeClient.StatefulSets(c.Namespace).Get(context.TODO(), c.statefulSetName(), metav1.GetOptions{}) + if err != nil && !k8sutil.ResourceNotFound(err) { + return fmt.Errorf("error during reading of statefulset: %v", err) + } + if err != nil { - if !k8sutil.ResourceNotFound(err) { - return fmt.Errorf("error during reading of statefulset: %v", err) - } // statefulset does not exist, try to re-create it - c.Statefulset = nil c.logger.Infof("cluster's statefulset does not exist") sset, err = c.createStatefulSet() @@ -318,6 +596,11 @@ func (c *Cluster) syncStatefulSet() error { c.logger.Infof("created missing statefulset %q", util.NameFromMeta(sset.ObjectMeta)) } else { + desiredSts, err := c.generateStatefulSet(&c.Spec) + if err != nil { + return fmt.Errorf("could not generate statefulset: %v", err) + } + c.logger.Debug("syncing statefulsets") // check if there are still pods with a rolling update flag for _, pod := range pods { if c.getRollingUpdateFlagFromPod(&pod) { @@ -332,18 +615,36 @@ func (c *Cluster) syncStatefulSet() error { } if len(podsToRecreate) > 0 { - c.logger.Debugf("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods)) + c.logger.Infof("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods)) } // statefulset is already there, make sure we use its definition in order to compare with the spec. c.Statefulset = sset - desiredSts, err := c.generateStatefulSet(&c.Spec) - if err != nil { - return fmt.Errorf("could not generate statefulset: %v", err) - } - cmp := c.compareStatefulSetWith(desiredSts) + if !cmp.rollingUpdate { + updatedPodAnnotations := map[string]*string{} + for _, anno := range cmp.deletedPodAnnotations { + updatedPodAnnotations[anno] = nil + } + for anno, val := range desiredSts.Spec.Template.Annotations { + updatedPodAnnotations[anno] = &val + } + metadataReq := map[string]map[string]map[string]*string{"metadata": {"annotations": updatedPodAnnotations}} + patch, err := json.Marshal(metadataReq) + if err != nil { + return fmt.Errorf("could not form patch for pod annotations: %v", err) + } + + for _, pod := range pods { + if changed, _ := c.compareAnnotations(pod.Annotations, desiredSts.Spec.Template.Annotations, nil); changed { + _, err = c.KubeClient.Pods(c.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("could not patch annotations for pod %q: %v", pod.Name, err) + } + } + } + } if !cmp.match { if cmp.rollingUpdate { podsToRecreate = make([]v1.Pod, 0) @@ -416,12 +717,14 @@ func (c *Cluster) syncStatefulSet() error { c.logger.Debug("syncing Patroni config") if configPatched, restartPrimaryFirst, restartWait, err = c.syncPatroniConfig(pods, c.Spec.Patroni, requiredPgParameters); err != nil { c.logger.Warningf("Patroni config updated? %v - errors during config sync: %v", configPatched, err) + postponeReasons = append(postponeReasons, "errors during Patroni config sync") isSafeToRecreatePods = false } // restart Postgres where it is still pending if err = c.restartInstances(pods, restartWait, restartPrimaryFirst); err != nil { c.logger.Errorf("errors while restarting Postgres in pods via Patroni API: %v", err) + postponeReasons = append(postponeReasons, "errors while restarting Postgres via Patroni API") isSafeToRecreatePods = false } @@ -429,14 +732,14 @@ func (c *Cluster) syncStatefulSet() error { // statefulset or those that got their configuration from the outdated statefulset) if len(podsToRecreate) > 0 { if isSafeToRecreatePods { - c.logger.Debugln("performing rolling update") + c.logger.Info("performing rolling update") c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Performing rolling update") if err := c.recreatePods(podsToRecreate, switchoverCandidates); err != nil { return fmt.Errorf("could not recreate pods: %v", err) } c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Rolling update done - pods have been recreated") } else { - c.logger.Warningf("postpone pod recreation until next sync because of errors during config sync") + c.logger.Warningf("postpone pod recreation until next sync - reason: %s", strings.Join(postponeReasons, `', '`)) } } @@ -610,6 +913,9 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv if desiredPatroniConfig.SynchronousModeStrict != effectivePatroniConfig.SynchronousModeStrict { configToSet["synchronous_mode_strict"] = desiredPatroniConfig.SynchronousModeStrict } + if desiredPatroniConfig.SynchronousNodeCount != effectivePatroniConfig.SynchronousNodeCount { + configToSet["synchronous_node_count"] = desiredPatroniConfig.SynchronousNodeCount + } if desiredPatroniConfig.TTL > 0 && desiredPatroniConfig.TTL != effectivePatroniConfig.TTL { configToSet["ttl"] = desiredPatroniConfig.TTL } @@ -643,7 +949,7 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv // check if specified slots exist in config and if they differ for slotName, desiredSlot := range desiredPatroniConfig.Slots { // only add slots specified in manifest to c.replicationSlots - for manifestSlotName, _ := range c.Spec.Patroni.Slots { + for manifestSlotName := range c.Spec.Patroni.Slots { if manifestSlotName == slotName { c.replicationSlots[slotName] = desiredSlot } @@ -664,7 +970,7 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv effectiveValue := effectivePgParameters[desiredOption] if isBootstrapOnlyParameter(desiredOption) && (effectiveValue != desiredValue) { parametersToSet[desiredOption] = desiredValue - if util.SliceContains(requirePrimaryRestartWhenDecreased, desiredOption) { + if slices.Contains(requirePrimaryRestartWhenDecreased, desiredOption) { effectiveValueNum, errConv := strconv.Atoi(effectiveValue) desiredValueNum, errConv2 := strconv.Atoi(desiredValue) if errConv != nil || errConv2 != nil { @@ -680,7 +986,7 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv } // check if there exist only config updates that require a restart of the primary - if len(restartPrimary) > 0 && !util.SliceContains(restartPrimary, false) && len(configToSet) == 0 { + if len(restartPrimary) > 0 && !slices.Contains(restartPrimary, false) && len(configToSet) == 0 { requiresMasterRestart = true } @@ -710,8 +1016,48 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv return configPatched, requiresMasterRestart, nil } +// syncStandbyClusterConfiguration checks whether standby cluster +// parameters have changed and if necessary sets it via the Patroni API +func (c *Cluster) syncStandbyClusterConfiguration() error { + var ( + err error + pods []v1.Pod + ) + + standbyOptionsToSet := make(map[string]interface{}) + if c.Spec.StandbyCluster != nil { + c.logger.Infof("turning %q into a standby cluster", c.Name) + standbyOptionsToSet["create_replica_methods"] = []string{"bootstrap_standby_with_wale", "basebackup_fast_xlog"} + standbyOptionsToSet["restore_command"] = "envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\"" + + } else { + c.logger.Infof("promoting standby cluster and detach from source") + standbyOptionsToSet = nil + } + + if pods, err = c.listPods(); err != nil { + return err + } + if len(pods) == 0 { + return fmt.Errorf("could not call Patroni API: cluster has no pods") + } + // try all pods until the first one that is successful, as it doesn't matter which pod + // carries the request to change configuration through + for _, pod := range pods { + podName := util.NameFromMeta(pod.ObjectMeta) + c.logger.Infof("patching Postgres config via Patroni API on pod %s with following options: %s", + podName, standbyOptionsToSet) + if err = c.patroni.SetStandbyClusterParameters(&pod, standbyOptionsToSet); err == nil { + return nil + } + c.logger.Warningf("could not patch postgres parameters within pod %s: %v", podName, err) + } + return fmt.Errorf("could not reach Patroni API to set Postgres options: failed on every pod (%d total)", + len(pods)) +} + func (c *Cluster) syncSecrets() error { - c.logger.Info("syncing secrets") + c.logger.Debug("syncing secrets") c.setProcessName("syncing secrets") generatedSecrets := c.generateUserSecrets() retentionUsers := make([]string, 0) @@ -721,7 +1067,7 @@ func (c *Cluster) syncSecrets() error { secret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{}) if err == nil { c.Secrets[secret.UID] = secret - c.logger.Debugf("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID) + c.logger.Infof("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID) continue } if k8sutil.ResourceAlreadyExists(err) { @@ -808,14 +1154,17 @@ func (c *Cluster) updateSecret( // if password rotation is enabled update password and username if rotation interval has been passed // rotation can be enabled globally or via the manifest (excluding the Postgres superuser) rotationEnabledInManifest := secretUsername != constants.SuperuserKeyName && - (util.SliceContains(c.Spec.UsersWithSecretRotation, secretUsername) || - util.SliceContains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername)) + (slices.Contains(c.Spec.UsersWithSecretRotation, secretUsername) || + slices.Contains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername)) // globally enabled rotation is only allowed for manifest and bootstrapped roles allowedRoleTypes := []spec.RoleOrigin{spec.RoleOriginManifest, spec.RoleOriginBootstrap} - rotationAllowed := !pwdUser.IsDbOwner && util.SliceContains(allowedRoleTypes, pwdUser.Origin) && c.Spec.StandbyCluster == nil + rotationAllowed := !pwdUser.IsDbOwner && slices.Contains(allowedRoleTypes, pwdUser.Origin) && c.Spec.StandbyCluster == nil + + // users can ignore any kind of rotation + isIgnoringRotation := slices.Contains(c.Spec.UsersIgnoringSecretRotation, secretUsername) - if (c.OpConfig.EnablePasswordRotation && rotationAllowed) || rotationEnabledInManifest { + if ((c.OpConfig.EnablePasswordRotation && rotationAllowed) || rotationEnabledInManifest) && !isIgnoringRotation { updateSecretMsg, err = c.rotatePasswordInSecret(secret, secretUsername, pwdUser.Origin, currentTime, retentionUsers) if err != nil { c.logger.Warnf("password rotation failed for user %s: %v", secretUsername, err) @@ -852,14 +1201,32 @@ func (c *Cluster) updateSecret( userMap[userKey] = pwdUser } + if !reflect.DeepEqual(secret.ObjectMeta.OwnerReferences, generatedSecret.ObjectMeta.OwnerReferences) { + updateSecret = true + updateSecretMsg = fmt.Sprintf("secret %s owner references do not match the current ones", secretName) + secret.ObjectMeta.OwnerReferences = generatedSecret.ObjectMeta.OwnerReferences + } + if updateSecret { - c.logger.Debugln(updateSecretMsg) - if _, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { + c.logger.Infof(updateSecretMsg) + if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("could not update secret %s: %v", secretName, err) } c.Secrets[secret.UID] = secret } + if changed, _ := c.compareAnnotations(secret.Annotations, generatedSecret.Annotations, nil); changed { + patchData, err := metaAnnotationsPatch(generatedSecret.Annotations) + if err != nil { + return fmt.Errorf("could not form patch for secret %q annotations: %v", secret.Name, err) + } + secret, err = c.KubeClient.Secrets(secret.Namespace).Patch(context.TODO(), secret.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("could not patch annotations for secret %q: %v", secret.Name, err) + } + c.Secrets[secret.UID] = secret + } + return nil } @@ -873,6 +1240,8 @@ func (c *Cluster) rotatePasswordInSecret( err error nextRotationDate time.Time nextRotationDateStr string + expectedUsername string + rotationModeChanged bool updateSecretMsg string ) @@ -893,17 +1262,32 @@ func (c *Cluster) rotatePasswordInSecret( nextRotationDate = currentRotationDate } + // set username and check if it differs from current value in secret + currentUsername := string(secret.Data["username"]) + if !slices.Contains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) { + expectedUsername = fmt.Sprintf("%s%s", secretUsername, currentTime.Format(constants.RotationUserDateFormat)) + } else { + expectedUsername = secretUsername + } + + // when changing to in-place rotation update secret immediatly + // if currentUsername is longer we know it has a date suffix + // the other way around we can wait until the next rotation date + if len(currentUsername) > len(expectedUsername) { + rotationModeChanged = true + c.logger.Infof("updating secret %s after switching to in-place rotation mode for username: %s", secretName, string(secret.Data["username"])) + } + // update password and next rotation date if configured interval has passed - if currentTime.After(nextRotationDate) { + if currentTime.After(nextRotationDate) || rotationModeChanged { // create rotation user if role is not listed for in-place password update - if !util.SliceContains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) { - rotationUsername := fmt.Sprintf("%s%s", secretUsername, currentTime.Format(constants.RotationUserDateFormat)) - secret.Data["username"] = []byte(rotationUsername) - c.logger.Infof("updating username in secret %s and creating rotation user %s in the database", secretName, rotationUsername) + if !slices.Contains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) { + secret.Data["username"] = []byte(expectedUsername) + c.logger.Infof("updating username in secret %s and creating rotation user %s in the database", secretName, expectedUsername) // whenever there is a rotation, check if old rotation users can be deleted *retentionUsers = append(*retentionUsers, secretUsername) } else { - // when passwords of system users are rotated in place, pods have to be replaced + // when passwords of system users are rotated in-place, pods have to be replaced if roleOrigin == spec.RoleOriginSystem { pods, err := c.listPods() if err != nil { @@ -917,7 +1301,7 @@ func (c *Cluster) rotatePasswordInSecret( } } - // when password of connection pooler is rotated in place, pooler pods have to be replaced + // when password of connection pooler is rotated in-place, pooler pods have to be replaced if roleOrigin == spec.RoleOriginConnectionPooler { listOptions := metav1.ListOptions{ LabelSelector: c.poolerLabelsSet(true).String(), @@ -934,10 +1318,12 @@ func (c *Cluster) rotatePasswordInSecret( } } - // when password of stream user is rotated in place, it should trigger rolling update in FES deployment + // when password of stream user is rotated in-place, it should trigger rolling update in FES deployment if roleOrigin == spec.RoleOriginStream { c.logger.Warnf("password in secret of stream user %s changed", constants.EventStreamSourceSlotPrefix+constants.UserRoleNameSuffix) } + + secret.Data["username"] = []byte(secretUsername) } secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength)) secret.Data["nextRotation"] = []byte(nextRotationDateStr) @@ -1266,18 +1652,56 @@ func (c *Cluster) syncLogicalBackupJob() error { if err != nil { return fmt.Errorf("could not generate the desired logical backup job state: %v", err) } - if match, reason := k8sutil.SameLogicalBackupJob(job, desiredJob); !match { + if !reflect.DeepEqual(job.ObjectMeta.OwnerReferences, desiredJob.ObjectMeta.OwnerReferences) { + c.logger.Info("new logical backup job's owner references do not match the current ones") + job, err = c.KubeClient.CronJobs(job.Namespace).Update(context.TODO(), desiredJob, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update owner references for logical backup job %q: %v", job.Name, err) + } + c.logger.Infof("logical backup job %s updated", c.getLogicalBackupJobName()) + } + if cmp := c.compareLogicalBackupJob(job, desiredJob); !cmp.match { c.logger.Infof("logical job %s is not in the desired state and needs to be updated", c.getLogicalBackupJobName(), ) - if reason != "" { - c.logger.Infof("reason: %s", reason) + if len(cmp.reasons) != 0 { + for _, reason := range cmp.reasons { + c.logger.Infof("reason: %s", reason) + } + } + if len(cmp.deletedPodAnnotations) != 0 { + templateMetadataReq := map[string]map[string]map[string]map[string]map[string]map[string]map[string]*string{ + "spec": {"jobTemplate": {"spec": {"template": {"metadata": {"annotations": {}}}}}}} + for _, anno := range cmp.deletedPodAnnotations { + templateMetadataReq["spec"]["jobTemplate"]["spec"]["template"]["metadata"]["annotations"][anno] = nil + } + patch, err := json.Marshal(templateMetadataReq) + if err != nil { + return fmt.Errorf("could not marshal ObjectMeta for logical backup job %q pod template: %v", jobName, err) + } + + job, err = c.KubeClient.CronJobs(c.Namespace).Patch(context.TODO(), jobName, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + c.logger.Errorf("failed to remove annotations from the logical backup job %q pod template: %v", jobName, err) + return err + } } if err = c.patchLogicalBackupJob(desiredJob); err != nil { return fmt.Errorf("could not update logical backup job to match desired state: %v", err) } c.logger.Info("the logical backup job is synced") } + if changed, _ := c.compareAnnotations(job.Annotations, desiredJob.Annotations, nil); changed { + patchData, err := metaAnnotationsPatch(desiredJob.Annotations) + if err != nil { + return fmt.Errorf("could not form patch for the logical backup job %q: %v", jobName, err) + } + _, err = c.KubeClient.CronJobs(c.Namespace).Patch(context.TODO(), jobName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("could not patch annotations of the logical backup job %q: %v", jobName, err) + } + } + c.LogicalBackupJob = desiredJob return nil } if !k8sutil.ResourceNotFound(err) { diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index f7f8ad9c7..f9d1d7873 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -2,13 +2,15 @@ package cluster import ( "bytes" - "io/ioutil" + "fmt" + "io" "net/http" "testing" "time" "context" + "golang.org/x/exp/slices" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -140,6 +142,181 @@ func TestSyncStatefulSetsAnnotations(t *testing.T) { } } +func TestPodAnnotationsSync(t *testing.T) { + clusterName := "acid-test-cluster-2" + namespace := "default" + podAnnotation := "no-scale-down" + podAnnotations := map[string]string{podAnnotation: "true"} + customPodAnnotation := "foo" + customPodAnnotations := map[string]string{customPodAnnotation: "true"} + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockClient := mocks.NewMockHTTPClient(ctrl) + client, _ := newFakeK8sAnnotationsClient() + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Volume: acidv1.Volume{ + Size: "1Gi", + }, + EnableConnectionPooler: boolToPointer(true), + EnableLogicalBackup: true, + EnableReplicaConnectionPooler: boolToPointer(true), + PodAnnotations: podAnnotations, + NumberOfInstances: 2, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + PatroniAPICheckInterval: time.Duration(1), + PatroniAPICheckTimeout: time.Duration(5), + PodManagementPolicy: "ordered_ready", + CustomPodAnnotations: customPodAnnotations, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: k8sutil.Int32ToPointer(1), + }, + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + MaxInstances: -1, + PodRoleLabel: "spilo-role", + ResourceCheckInterval: time.Duration(3), + ResourceCheckTimeout: time.Duration(10), + }, + }, + }, client, pg, logger, eventRecorder) + + configJson := `{"postgresql": {"parameters": {"log_min_duration_statement": 200, "max_connections": 50}}}, "ttl": 20}` + response := http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewReader([]byte(configJson))), + } + + mockClient.EXPECT().Do(gomock.Any()).Return(&response, nil).AnyTimes() + cluster.patroni = patroni.New(patroniLogger, mockClient) + cluster.Name = clusterName + cluster.Namespace = namespace + clusterOptions := clusterLabelsOptions(cluster) + + // create a statefulset + _, err := cluster.createStatefulSet() + assert.NoError(t, err) + // create a pods + podsList := createPods(cluster) + for _, pod := range podsList { + _, err = cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + assert.NoError(t, err) + } + // create connection pooler + _, err = cluster.createConnectionPooler(mockInstallLookupFunction) + assert.NoError(t, err) + + // create cron job + err = cluster.createLogicalBackupJob() + assert.NoError(t, err) + + annotateResources(cluster) + err = cluster.Sync(&cluster.Postgresql) + assert.NoError(t, err) + + // 1. PodAnnotations set + stsList, err := cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, sts := range stsList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.Contains(t, sts.Spec.Template.Annotations, annotation) + } + } + + for _, role := range []PostgresRole{Master, Replica} { + deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) + assert.NoError(t, err) + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.Contains(t, deploy.Spec.Template.Annotations, annotation, + fmt.Sprintf("pooler deployment pod template %s should contain annotation %s, found %#v", + deploy.Name, annotation, deploy.Spec.Template.Annotations)) + } + } + + podList, err := cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, pod := range podList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.Contains(t, pod.Annotations, annotation, + fmt.Sprintf("pod %s should contain annotation %s, found %#v", pod.Name, annotation, pod.Annotations)) + } + } + + cronJobList, err := cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, cronJob := range cronJobList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.Contains(t, cronJob.Spec.JobTemplate.Spec.Template.Annotations, annotation, + fmt.Sprintf("logical backup cron job's pod template should contain annotation %s, found %#v", + annotation, cronJob.Spec.JobTemplate.Spec.Template.Annotations)) + } + } + + // 2 PodAnnotations removed + newSpec := cluster.Postgresql.DeepCopy() + newSpec.Spec.PodAnnotations = nil + cluster.OpConfig.CustomPodAnnotations = nil + err = cluster.Sync(newSpec) + assert.NoError(t, err) + + stsList, err = cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, sts := range stsList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.NotContains(t, sts.Spec.Template.Annotations, annotation) + } + } + + for _, role := range []PostgresRole{Master, Replica} { + deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) + assert.NoError(t, err) + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.NotContains(t, deploy.Spec.Template.Annotations, annotation, + fmt.Sprintf("pooler deployment pod template %s should not contain annotation %s, found %#v", + deploy.Name, annotation, deploy.Spec.Template.Annotations)) + } + } + + podList, err = cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, pod := range podList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.NotContains(t, pod.Annotations, annotation, + fmt.Sprintf("pod %s should not contain annotation %s, found %#v", pod.Name, annotation, pod.Annotations)) + } + } + + cronJobList, err = cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, cronJob := range cronJobList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.NotContains(t, cronJob.Spec.JobTemplate.Spec.Template.Annotations, annotation, + fmt.Sprintf("logical backup cron job's pod template should not contain annotation %s, found %#v", + annotation, cronJob.Spec.JobTemplate.Spec.Template.Annotations)) + } + } +} + func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) { testName := "test config comparison" client, _ := newFakeK8sSyncClient() @@ -200,7 +377,7 @@ func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) { // mocking a config after setConfig is called configJson := `{"postgresql": {"parameters": {"log_min_duration_statement": 200, "max_connections": 50}}}, "ttl": 20}` - r := ioutil.NopCloser(bytes.NewReader([]byte(configJson))) + r := io.NopCloser(bytes.NewReader([]byte(configJson))) response := http.Response{ StatusCode: 200, @@ -480,6 +657,140 @@ func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) { } } +func TestSyncStandbyClusterConfiguration(t *testing.T) { + client, _ := newFakeK8sSyncClient() + clusterName := "acid-standby-cluster" + applicationLabel := "spilo" + namespace := "default" + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + NumberOfInstances: int32(1), + Volume: acidv1.Volume{ + Size: "1Gi", + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + PatroniAPICheckInterval: time.Duration(1), + PatroniAPICheckTimeout: time.Duration(5), + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": applicationLabel}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + MinInstances: int32(-1), + MaxInstances: int32(-1), + PodRoleLabel: "spilo-role", + ResourceCheckInterval: time.Duration(3), + ResourceCheckTimeout: time.Duration(10), + }, + }, + }, client, pg, logger, eventRecorder) + + cluster.Name = clusterName + cluster.Namespace = namespace + + // mocking a config after getConfig is called + mockClient := mocks.NewMockHTTPClient(ctrl) + configJson := `{"ttl": 20}` + r := io.NopCloser(bytes.NewReader([]byte(configJson))) + response := http.Response{ + StatusCode: 200, + Body: r, + } + mockClient.EXPECT().Get(gomock.Any()).Return(&response, nil).AnyTimes() + + // mocking a config after setConfig is called + standbyJson := `{"standby_cluster":{"create_replica_methods":["bootstrap_standby_with_wale","basebackup_fast_xlog"],"restore_command":"envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\""}}` + r = io.NopCloser(bytes.NewReader([]byte(standbyJson))) + response = http.Response{ + StatusCode: 200, + Body: r, + } + mockClient.EXPECT().Do(gomock.Any()).Return(&response, nil).AnyTimes() + p := patroni.New(patroniLogger, mockClient) + cluster.patroni = p + + mockPod := newMockPod("192.168.100.1") + mockPod.Name = fmt.Sprintf("%s-0", clusterName) + mockPod.Namespace = namespace + podLabels := map[string]string{ + "cluster-name": clusterName, + "application": applicationLabel, + "spilo-role": "master", + } + mockPod.Labels = podLabels + client.PodsGetter.Pods(namespace).Create(context.TODO(), mockPod, metav1.CreateOptions{}) + + // create a statefulset + sts, err := cluster.createStatefulSet() + assert.NoError(t, err) + + // check that pods do not have a STANDBY_* environment variable + assert.NotContains(t, sts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"}) + + // add standby section + cluster.Spec.StandbyCluster = &acidv1.StandbyDescription{ + S3WalPath: "s3://custom/path/to/bucket/", + } + cluster.syncStatefulSet() + updatedSts := cluster.Statefulset + + // check that pods do not have a STANDBY_* environment variable + assert.Contains(t, updatedSts.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"}) + + // this should update the Patroni config + err = cluster.syncStandbyClusterConfiguration() + assert.NoError(t, err) + + configJson = `{"standby_cluster":{"create_replica_methods":["bootstrap_standby_with_wale","basebackup_fast_xlog"],"restore_command":"envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\""}, "ttl": 20}` + r = io.NopCloser(bytes.NewReader([]byte(configJson))) + response = http.Response{ + StatusCode: 200, + Body: r, + } + mockClient.EXPECT().Get(gomock.Any()).Return(&response, nil).AnyTimes() + + pods, err := cluster.listPods() + assert.NoError(t, err) + + _, _, err = cluster.patroni.GetConfig(&pods[0]) + assert.NoError(t, err) + // ToDo extend GetConfig to return standy_cluster setting to compare + /* + defaultStandbyParameters := map[string]interface{}{ + "create_replica_methods": []string{"bootstrap_standby_with_wale", "basebackup_fast_xlog"}, + "restore_command": "envdir \"/run/etc/wal-e.d/env-standby\" /scripts/restore_command.sh \"%f\" \"%p\"", + } + assert.True(t, reflect.DeepEqual(defaultStandbyParameters, standbyCluster)) + */ + // remove standby section + cluster.Spec.StandbyCluster = &acidv1.StandbyDescription{} + cluster.syncStatefulSet() + updatedSts2 := cluster.Statefulset + + // check that pods do not have a STANDBY_* environment variable + assert.NotContains(t, updatedSts2.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "STANDBY_METHOD", Value: "STANDBY_WITH_WALE"}) + + // this should update the Patroni config again + err = cluster.syncStandbyClusterConfiguration() + assert.NoError(t, err) +} + func TestUpdateSecret(t *testing.T) { testName := "test syncing secrets" client, _ := newFakeK8sSyncSecretsClient() @@ -488,6 +799,7 @@ func TestUpdateSecret(t *testing.T) { namespace := "default" dbname := "app" dbowner := "appowner" + appUser := "foo" secretTemplate := config.StringTemplate("{username}.{cluster}.credentials") retentionUsers := make([]string, 0) @@ -499,14 +811,15 @@ func TestUpdateSecret(t *testing.T) { }, Spec: acidv1.PostgresSpec{ Databases: map[string]string{dbname: dbowner}, - Users: map[string]acidv1.UserFlags{"foo": {}, dbowner: {}}, + Users: map[string]acidv1.UserFlags{appUser: {}, "bar": {}, dbowner: {}}, + UsersIgnoringSecretRotation: []string{"bar"}, UsersWithInPlaceSecretRotation: []string{dbowner}, Streams: []acidv1.Stream{ { ApplicationId: appId, Database: dbname, Tables: map[string]acidv1.StreamTable{ - "data.foo": acidv1.StreamTable{ + "data.foo": { EventType: "stream-type-b", }, }, @@ -577,6 +890,9 @@ func TestUpdateSecret(t *testing.T) { if pgUser.Origin != spec.RoleOriginManifest { continue } + if slices.Contains(pg.Spec.UsersIgnoringSecretRotation, username) { + continue + } t.Errorf("%s: password unchanged in updated secret for %s", testName, username) } @@ -604,4 +920,32 @@ func TestUpdateSecret(t *testing.T) { } } } + + // switch rotation for foo to in-place + inPlaceRotationUsers := []string{dbowner, appUser} + cluster.Spec.UsersWithInPlaceSecretRotation = inPlaceRotationUsers + cluster.initUsers() + cluster.syncSecrets() + updatedSecret, err := cluster.KubeClient.Secrets(namespace).Get(context.TODO(), cluster.credentialSecretName(appUser), metav1.GetOptions{}) + assert.NoError(t, err) + + // username in secret should be switched to original user + currentUsername := string(updatedSecret.Data["username"]) + if currentUsername != appUser { + t.Errorf("%s: updated secret does not contain correct username: expected %s, got %s", testName, appUser, currentUsername) + } + + // switch rotation back to rotation user + inPlaceRotationUsers = []string{dbowner} + cluster.Spec.UsersWithInPlaceSecretRotation = inPlaceRotationUsers + cluster.initUsers() + cluster.syncSecrets() + updatedSecret, err = cluster.KubeClient.Secrets(namespace).Get(context.TODO(), cluster.credentialSecretName(appUser), metav1.GetOptions{}) + assert.NoError(t, err) + + // username in secret will only be switched after next rotation date is passed + currentUsername = string(updatedSecret.Data["username"]) + if currentUsername != appUser { + t.Errorf("%s: updated secret does not contain expected username: expected %s, got %s", testName, appUser, currentUsername) + } } diff --git a/pkg/cluster/types.go b/pkg/cluster/types.go index 1b4d0f389..17c4e705e 100644 --- a/pkg/cluster/types.go +++ b/pkg/cluster/types.go @@ -17,6 +17,7 @@ const ( // spilo roles Master PostgresRole = "master" Replica PostgresRole = "replica" + Patroni PostgresRole = "config" // roles returned by Patroni cluster endpoint Leader PostgresRole = "leader" @@ -57,15 +58,16 @@ type WorkerStatus struct { // ClusterStatus describes status of the cluster type ClusterStatus struct { - Team string - Cluster string - Namespace string - MasterService *v1.Service - ReplicaService *v1.Service - MasterEndpoint *v1.Endpoints - ReplicaEndpoint *v1.Endpoints - StatefulSet *appsv1.StatefulSet - PodDisruptionBudget *policyv1.PodDisruptionBudget + Team string + Cluster string + Namespace string + MasterService *v1.Service + ReplicaService *v1.Service + MasterEndpoint *v1.Endpoints + ReplicaEndpoint *v1.Endpoints + StatefulSet *appsv1.StatefulSet + PrimaryPodDisruptionBudget *policyv1.PodDisruptionBudget + CriticalOpPodDisruptionBudget *policyv1.PodDisruptionBudget CurrentProcess Process Worker uint32 diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index 401e43155..0e31ecc32 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -78,7 +78,14 @@ func (c *Cluster) isProtectedUsername(username string) bool { } func (c *Cluster) isSystemUsername(username string) bool { - return (username == c.OpConfig.SuperUsername || username == c.OpConfig.ReplicationUsername) + // is there a pooler system user defined + for _, systemUser := range c.systemUsers { + if username == systemUser.Name { + return true + } + } + + return false } func isValidFlag(flag string) bool { @@ -169,6 +176,10 @@ func (c *Cluster) logPDBChanges(old, new *policyv1.PodDisruptionBudget, isUpdate } logNiceDiff(c.logger, old.Spec, new.Spec) + + if reason != "" { + c.logger.Infof("reason: %s", reason) + } } func logNiceDiff(log *logrus.Entry, old, new interface{}) { @@ -182,7 +193,7 @@ func logNiceDiff(log *logrus.Entry, old, new interface{}) { nice := nicediff.Diff(string(o), string(n), true) for _, s := range strings.Split(nice, "\n") { // " is not needed in the value to understand - log.Debugf(strings.ReplaceAll(s, "\"", "")) + log.Debug(strings.ReplaceAll(s, "\"", "")) } } @@ -198,7 +209,7 @@ func (c *Cluster) logStatefulSetChanges(old, new *appsv1.StatefulSet, isUpdate b logNiceDiff(c.logger, old.Spec, new.Spec) if !reflect.DeepEqual(old.Annotations, new.Annotations) { - c.logger.Debugf("metadata.annotation are different") + c.logger.Debug("metadata.annotation are different") logNiceDiff(c.logger, old.Annotations, new.Annotations) } @@ -269,7 +280,7 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { } if !c.OpConfig.EnableTeamsAPI { - c.logger.Debugf("team API is disabled") + c.logger.Debug("team API is disabled") return members, nil } @@ -405,7 +416,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error { podsNumber = len(pods.Items) c.logger.Debugf("Waiting for %d pods to become ready", podsNumber) } else { - c.logger.Debugf("Waiting for any replica pod to become ready") + c.logger.Debug("Waiting for any replica pod to become ready") } err := retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, @@ -438,10 +449,6 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error { return err } -func (c *Cluster) waitForAnyReplicaLabelReady() error { - return c._waitPodLabelsReady(true) -} - func (c *Cluster) waitForAllPodsLabelReady() error { return c._waitPodLabelsReady(false) } @@ -655,3 +662,24 @@ func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (ac } return resources, nil } + +func isInMaintenanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool { + if len(specMaintenanceWindows) == 0 { + return true + } + now := time.Now() + currentDay := now.Weekday() + currentTime := now.Format("15:04") + + for _, window := range specMaintenanceWindows { + startTime := window.StartTime.Format("15:04") + endTime := window.EndTime.Format("15:04") + + if window.Everyday || window.Weekday == currentDay { + if currentTime >= startTime && currentTime <= endTime { + return true + } + } + } + return false +} diff --git a/pkg/cluster/util_test.go b/pkg/cluster/util_test.go index 5d8b92f2c..9cd7dc7e9 100644 --- a/pkg/cluster/util_test.go +++ b/pkg/cluster/util_test.go @@ -1,57 +1,299 @@ package cluster import ( + "bytes" "context" + "fmt" + "io" + "maps" + "net/http" + "reflect" "testing" + "time" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "github.com/zalando/postgres-operator/mocks" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "github.com/zalando/postgres-operator/pkg/util/patroni" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" k8sFake "k8s.io/client-go/kubernetes/fake" ) +var externalAnnotations = map[string]string{"existing": "annotation"} + +func mustParseTime(s string) metav1.Time { + v, err := time.Parse("15:04", s) + if err != nil { + panic(err) + } + + return metav1.Time{Time: v.UTC()} +} + func newFakeK8sAnnotationsClient() (k8sutil.KubernetesClient, *k8sFake.Clientset) { clientSet := k8sFake.NewSimpleClientset() acidClientSet := fakeacidv1.NewSimpleClientset() return k8sutil.KubernetesClient{ - PodDisruptionBudgetsGetter: clientSet.PolicyV1(), - ServicesGetter: clientSet.CoreV1(), - StatefulSetsGetter: clientSet.AppsV1(), - PostgresqlsGetter: acidClientSet.AcidV1(), + PodDisruptionBudgetsGetter: clientSet.PolicyV1(), + SecretsGetter: clientSet.CoreV1(), + ServicesGetter: clientSet.CoreV1(), + StatefulSetsGetter: clientSet.AppsV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + PersistentVolumeClaimsGetter: clientSet.CoreV1(), + PersistentVolumesGetter: clientSet.CoreV1(), + EndpointsGetter: clientSet.CoreV1(), + ConfigMapsGetter: clientSet.CoreV1(), + PodsGetter: clientSet.CoreV1(), + DeploymentsGetter: clientSet.AppsV1(), + CronJobsGetter: clientSet.BatchV1(), }, clientSet } -func TestInheritedAnnotations(t *testing.T) { - testName := "test inheriting annotations from manifest" - client, _ := newFakeK8sAnnotationsClient() - clusterName := "acid-test-cluster" - namespace := "default" - annotationValue := "acid" - role := Master +func clusterLabelsOptions(cluster *Cluster) metav1.ListOptions { + clusterLabel := labels.Set(map[string]string{cluster.OpConfig.ClusterNameLabel: cluster.Name}) + return metav1.ListOptions{ + LabelSelector: clusterLabel.String(), + } +} + +func checkResourcesInheritedAnnotations(cluster *Cluster, resultAnnotations map[string]string) error { + clusterOptions := clusterLabelsOptions(cluster) + // helper functions + containsAnnotations := func(expected map[string]string, actual map[string]string, objName string, objType string) error { + if !util.MapContains(actual, expected) { + return fmt.Errorf("%s %v expected annotations %#v to be contained in %#v", objType, objName, expected, actual) + } + return nil + } + + updateAnnotations := func(annotations map[string]string) map[string]string { + result := make(map[string]string, 0) + for anno := range annotations { + if _, ok := externalAnnotations[anno]; !ok { + result[anno] = annotations[anno] + } + } + return result + } + + checkSts := func(annotations map[string]string) error { + stsList, err := cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + stsAnnotations := updateAnnotations(annotations) + + for _, sts := range stsList.Items { + if err := containsAnnotations(stsAnnotations, sts.Annotations, sts.ObjectMeta.Name, "StatefulSet"); err != nil { + return err + } + // pod template + if err := containsAnnotations(stsAnnotations, sts.Spec.Template.Annotations, sts.ObjectMeta.Name, "StatefulSet pod template"); err != nil { + return err + } + // pvc template + if err := containsAnnotations(stsAnnotations, sts.Spec.VolumeClaimTemplates[0].Annotations, sts.ObjectMeta.Name, "StatefulSet pvc template"); err != nil { + return err + } + } + return nil + } + + checkPods := func(annotations map[string]string) error { + podList, err := cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, pod := range podList.Items { + if err := containsAnnotations(annotations, pod.Annotations, pod.ObjectMeta.Name, "Pod"); err != nil { + return err + } + } + return nil + } + + checkSvc := func(annotations map[string]string) error { + svcList, err := cluster.KubeClient.Services(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, svc := range svcList.Items { + if err := containsAnnotations(annotations, svc.Annotations, svc.ObjectMeta.Name, "Service"); err != nil { + return err + } + } + return nil + } + + checkPdb := func(annotations map[string]string) error { + pdbList, err := cluster.KubeClient.PodDisruptionBudgets(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, pdb := range pdbList.Items { + if err := containsAnnotations(updateAnnotations(annotations), pdb.Annotations, pdb.ObjectMeta.Name, "Pod Disruption Budget"); err != nil { + return err + } + } + return nil + } + checkPvc := func(annotations map[string]string) error { + pvcList, err := cluster.KubeClient.PersistentVolumeClaims(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, pvc := range pvcList.Items { + if err := containsAnnotations(annotations, pvc.Annotations, pvc.ObjectMeta.Name, "Volume claim"); err != nil { + return err + } + } + return nil + } + + checkPooler := func(annotations map[string]string) error { + for _, role := range []PostgresRole{Master, Replica} { + deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) + if err != nil { + return err + } + if err := containsAnnotations(annotations, deploy.Annotations, deploy.Name, "Deployment"); err != nil { + return err + } + if err := containsAnnotations(updateAnnotations(annotations), deploy.Spec.Template.Annotations, deploy.Name, "Pooler pod template"); err != nil { + return err + } + } + return nil + } + + checkCronJob := func(annotations map[string]string) error { + cronJobList, err := cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, cronJob := range cronJobList.Items { + if err := containsAnnotations(annotations, cronJob.Annotations, cronJob.ObjectMeta.Name, "Logical backup cron job"); err != nil { + return err + } + if err := containsAnnotations(updateAnnotations(annotations), cronJob.Spec.JobTemplate.Spec.Template.Annotations, cronJob.Name, "Logical backup cron job pod template"); err != nil { + return err + } + } + return nil + } + + checkSecrets := func(annotations map[string]string) error { + secretList, err := cluster.KubeClient.Secrets(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, secret := range secretList.Items { + if err := containsAnnotations(annotations, secret.Annotations, secret.Name, "Secret"); err != nil { + return err + } + } + return nil + } + + checkEndpoints := func(annotations map[string]string) error { + endpointsList, err := cluster.KubeClient.Endpoints(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, ep := range endpointsList.Items { + if err := containsAnnotations(annotations, ep.Annotations, ep.Name, "Endpoints"); err != nil { + return err + } + } + return nil + } + + checkConfigMaps := func(annotations map[string]string) error { + cmList, err := cluster.KubeClient.ConfigMaps(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, cm := range cmList.Items { + if err := containsAnnotations(annotations, cm.Annotations, cm.ObjectMeta.Name, "ConfigMap"); err != nil { + return err + } + } + return nil + } + + checkFuncs := []func(map[string]string) error{ + checkSts, checkPods, checkSvc, checkPdb, checkPooler, checkCronJob, checkPvc, checkSecrets, checkEndpoints, checkConfigMaps, + } + for _, f := range checkFuncs { + if err := f(resultAnnotations); err != nil { + return err + } + } + return nil +} + +func createPods(cluster *Cluster) []v1.Pod { + podsList := make([]v1.Pod, 0) + for i, role := range []PostgresRole{Master, Replica} { + podsList = append(podsList, v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", cluster.Name, i), + Namespace: namespace, + Labels: map[string]string{ + "application": "spilo", + "cluster-name": cluster.Name, + "spilo-role": string(role), + }, + }, + }) + podsList = append(podsList, v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-pooler-%s", cluster.Name, role), + Namespace: namespace, + Labels: cluster.connectionPoolerLabels(role, true).MatchLabels, + }, + }) + } + + return podsList +} + +func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster, error) { pg := acidv1.Postgresql{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, Annotations: map[string]string{ - "owned-by": annotationValue, + "owned-by": "acid", + "foo": "bar", // should not be inherited }, }, Spec: acidv1.PostgresSpec{ + EnableConnectionPooler: boolToPointer(true), EnableReplicaConnectionPooler: boolToPointer(true), + EnableLogicalBackup: true, Volume: acidv1.Volume{ Size: "1Gi", }, + NumberOfInstances: 2, }, } - var cluster = New( + cluster := New( Config{ OpConfig: config.Config{ + PatroniAPICheckInterval: time.Duration(1), + PatroniAPICheckTimeout: time.Duration(5), + KubernetesUseConfigMaps: true, ConnectionPooler: config.ConnectionPooler{ ConnectionPoolerDefaultCPURequest: "100m", ConnectionPoolerDefaultCPULimit: "100m", @@ -59,85 +301,313 @@ func TestInheritedAnnotations(t *testing.T) { ConnectionPoolerDefaultMemoryLimit: "100Mi", NumberOfInstances: k8sutil.Int32ToPointer(1), }, + PDBNameFormat: "postgres-{cluster}-pdb", PodManagementPolicy: "ordered_ready", Resources: config.Resources{ - ClusterLabels: map[string]string{"application": "spilo"}, - ClusterNameLabel: "cluster-name", - DefaultCPURequest: "300m", - DefaultCPULimit: "300m", - DefaultMemoryRequest: "300Mi", - DefaultMemoryLimit: "300Mi", - InheritedAnnotations: []string{"owned-by"}, - PodRoleLabel: "spilo-role", + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + InheritedAnnotations: []string{"owned-by"}, + PodRoleLabel: "spilo-role", + ResourceCheckInterval: time.Duration(testResourceCheckInterval), + ResourceCheckTimeout: time.Duration(testResourceCheckTimeout), + MinInstances: -1, + MaxInstances: -1, }, }, }, client, pg, logger, eventRecorder) - cluster.Name = clusterName cluster.Namespace = namespace + _, err := cluster.createStatefulSet() + if err != nil { + return nil, err + } + _, err = cluster.createService(Master) + if err != nil { + return nil, err + } + err = cluster.createPodDisruptionBudgets() + if err != nil { + return nil, err + } + _, err = cluster.createConnectionPooler(mockInstallLookupFunction) + if err != nil { + return nil, err + } + err = cluster.createLogicalBackupJob() + if err != nil { + return nil, err + } + pvcList := CreatePVCs(namespace, clusterName, cluster.labelsSet(false), 2, "1Gi") + for _, pvc := range pvcList.Items { + _, err = cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + } + podsList := createPods(cluster) + for _, pod := range podsList { + _, err = cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + } + + // resources which Patroni creates + if err = createPatroniResources(cluster); err != nil { + return nil, err + } - // test annotationsSet function - inheritedAnnotations := cluster.annotationsSet(nil) + return cluster, nil +} - listOptions := metav1.ListOptions{ - LabelSelector: cluster.labelsSet(false).String(), +func createPatroniResources(cluster *Cluster) error { + patroniService := cluster.generateService(Replica, &pg.Spec) + patroniService.ObjectMeta.Name = cluster.serviceName(Patroni) + _, err := cluster.KubeClient.Services(namespace).Create(context.TODO(), patroniService, metav1.CreateOptions{}) + if err != nil { + return err } - // check statefulset annotations - _, err := cluster.createStatefulSet() - assert.NoError(t, err) + for _, suffix := range patroniObjectSuffixes { + metadata := metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", clusterName, suffix), + Namespace: namespace, + Annotations: map[string]string{ + "initialize": "123456789", + }, + Labels: cluster.labelsSet(false), + } - stsList, err := client.StatefulSets(namespace).List(context.TODO(), listOptions) - assert.NoError(t, err) - for _, sts := range stsList.Items { - if !(util.MapContains(sts.ObjectMeta.Annotations, inheritedAnnotations)) { - t.Errorf("%s: StatefulSet %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + if cluster.OpConfig.KubernetesUseConfigMaps { + configMap := v1.ConfigMap{ + ObjectMeta: metadata, + } + _, err := cluster.KubeClient.ConfigMaps(namespace).Create(context.TODO(), &configMap, metav1.CreateOptions{}) + if err != nil { + return err + } + } else { + endpoints := v1.Endpoints{ + ObjectMeta: metadata, + } + _, err := cluster.KubeClient.Endpoints(namespace).Create(context.TODO(), &endpoints, metav1.CreateOptions{}) + if err != nil { + return err + } } - // pod template - if !(util.MapContains(sts.Spec.Template.ObjectMeta.Annotations, inheritedAnnotations)) { - t.Errorf("%s: pod template %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + + return nil +} + +func annotateResources(cluster *Cluster) error { + clusterOptions := clusterLabelsOptions(cluster) + patchData, err := metaAnnotationsPatch(externalAnnotations) + if err != nil { + return err + } + + stsList, err := cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, sts := range stsList.Items { + sts.Annotations = externalAnnotations + if _, err = cluster.KubeClient.StatefulSets(namespace).Patch(context.TODO(), sts.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { + return err } - // pvc template - if !(util.MapContains(sts.Spec.VolumeClaimTemplates[0].Annotations, inheritedAnnotations)) { - t.Errorf("%s: PVC template %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + + podList, err := cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, pod := range podList.Items { + pod.Annotations = externalAnnotations + if _, err = cluster.KubeClient.Pods(namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { + return err } } - // check service annotations - cluster.createService(Master) - svcList, err := client.Services(namespace).List(context.TODO(), listOptions) - assert.NoError(t, err) + svcList, err := cluster.KubeClient.Services(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } for _, svc := range svcList.Items { - if !(util.MapContains(svc.ObjectMeta.Annotations, inheritedAnnotations)) { - t.Errorf("%s: Service %v not inherited annotations %#v, got %#v", testName, svc.ObjectMeta.Name, inheritedAnnotations, svc.ObjectMeta.Annotations) + svc.Annotations = externalAnnotations + if _, err = cluster.KubeClient.Services(namespace).Patch(context.TODO(), svc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { + return err } } - // check pod disruption budget annotations - cluster.createPodDisruptionBudget() - pdbList, err := client.PodDisruptionBudgets(namespace).List(context.TODO(), listOptions) - assert.NoError(t, err) + pdbList, err := cluster.KubeClient.PodDisruptionBudgets(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } for _, pdb := range pdbList.Items { - if !(util.MapContains(pdb.ObjectMeta.Annotations, inheritedAnnotations)) { - t.Errorf("%s: Pod Disruption Budget %v not inherited annotations %#v, got %#v", testName, pdb.ObjectMeta.Name, inheritedAnnotations, pdb.ObjectMeta.Annotations) + pdb.Annotations = externalAnnotations + _, err = cluster.KubeClient.PodDisruptionBudgets(namespace).Patch(context.TODO(), pdb.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + if err != nil { + return err + } + } + + cronJobList, err := cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, cronJob := range cronJobList.Items { + cronJob.Annotations = externalAnnotations + _, err = cluster.KubeClient.CronJobs(namespace).Patch(context.TODO(), cronJob.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + if err != nil { + return err } } - // check pooler deployment annotations - cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} - cluster.ConnectionPooler[role] = &ConnectionPoolerObjects{ - Name: cluster.connectionPoolerName(role), - ClusterName: cluster.Name, - Namespace: cluster.Namespace, - Role: role, + pvcList, err := cluster.KubeClient.PersistentVolumeClaims(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err } - deploy, err := cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[role]) + for _, pvc := range pvcList.Items { + pvc.Annotations = externalAnnotations + if _, err = cluster.KubeClient.PersistentVolumeClaims(namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { + return err + } + } + + for _, role := range []PostgresRole{Master, Replica} { + deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) + if err != nil { + return err + } + deploy.Annotations = externalAnnotations + if _, err = cluster.KubeClient.Deployments(namespace).Patch(context.TODO(), deploy.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { + return err + } + } + + secrets, err := cluster.KubeClient.Secrets(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, secret := range secrets.Items { + secret.Annotations = externalAnnotations + if _, err = cluster.KubeClient.Secrets(namespace).Patch(context.TODO(), secret.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { + return err + } + } + + endpoints, err := cluster.KubeClient.Endpoints(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, ep := range endpoints.Items { + ep.Annotations = externalAnnotations + if _, err = cluster.KubeClient.Endpoints(namespace).Patch(context.TODO(), ep.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { + return err + } + } + + configMaps, err := cluster.KubeClient.ConfigMaps(namespace).List(context.TODO(), clusterOptions) + if err != nil { + return err + } + for _, cm := range configMaps.Items { + cm.Annotations = externalAnnotations + if _, err = cluster.KubeClient.ConfigMaps(namespace).Patch(context.TODO(), cm.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}); err != nil { + return err + } + } + + return nil +} + +func TestInheritedAnnotations(t *testing.T) { + // mocks + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client, _ := newFakeK8sAnnotationsClient() + mockClient := mocks.NewMockHTTPClient(ctrl) + + cluster, err := newInheritedAnnotationsCluster(client) assert.NoError(t, err) - if !(util.MapContains(deploy.ObjectMeta.Annotations, inheritedAnnotations)) { - t.Errorf("%s: Deployment %v not inherited annotations %#v, got %#v", testName, deploy.ObjectMeta.Name, inheritedAnnotations, deploy.ObjectMeta.Annotations) + configJson := `{"postgresql": {"parameters": {"log_min_duration_statement": 200, "max_connections": 50}}}, "ttl": 20}` + response := http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewReader([]byte(configJson))), } + mockClient.EXPECT().Do(gomock.Any()).Return(&response, nil).AnyTimes() + cluster.patroni = patroni.New(patroniLogger, mockClient) + + err = cluster.Sync(&cluster.Postgresql) + assert.NoError(t, err) + filterLabels := cluster.labelsSet(false) + + // Finally, tests! + result := map[string]string{"owned-by": "acid"} + assert.True(t, reflect.DeepEqual(result, cluster.annotationsSet(nil))) + + // 1. Check initial state + err = checkResourcesInheritedAnnotations(cluster, result) + assert.NoError(t, err) + + // 2. Check annotation value change + + // 2.1 Sync event + newSpec := cluster.Postgresql.DeepCopy() + newSpec.Annotations["owned-by"] = "fooSync" + result["owned-by"] = "fooSync" + + err = cluster.Sync(newSpec) + assert.NoError(t, err) + err = checkResourcesInheritedAnnotations(cluster, result) + assert.NoError(t, err) + + // + existing PVC without annotations + cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &CreatePVCs(namespace, clusterName, filterLabels, 3, "1Gi").Items[2], metav1.CreateOptions{}) + err = cluster.Sync(newSpec) + assert.NoError(t, err) + err = checkResourcesInheritedAnnotations(cluster, result) + assert.NoError(t, err) + + // 2.2 Update event + newSpec = cluster.Postgresql.DeepCopy() + newSpec.Annotations["owned-by"] = "fooUpdate" + result["owned-by"] = "fooUpdate" + // + new PVC + cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &CreatePVCs(namespace, clusterName, filterLabels, 4, "1Gi").Items[3], metav1.CreateOptions{}) + + err = cluster.Update(cluster.Postgresql.DeepCopy(), newSpec) + assert.NoError(t, err) + + err = checkResourcesInheritedAnnotations(cluster, result) + assert.NoError(t, err) + + // 3. Change from ConfigMaps to Endpoints + err = cluster.deletePatroniResources() + assert.NoError(t, err) + cluster.OpConfig.KubernetesUseConfigMaps = false + err = createPatroniResources(cluster) + assert.NoError(t, err) + err = cluster.Sync(newSpec.DeepCopy()) + assert.NoError(t, err) + err = checkResourcesInheritedAnnotations(cluster, result) + assert.NoError(t, err) + + // 4. Existing annotations (should not be removed) + err = annotateResources(cluster) + assert.NoError(t, err) + maps.Copy(result, externalAnnotations) + err = cluster.Sync(newSpec.DeepCopy()) + assert.NoError(t, err) + err = checkResourcesInheritedAnnotations(cluster, result) + assert.NoError(t, err) } func Test_trimCronjobName(t *testing.T) { @@ -179,3 +649,65 @@ func Test_trimCronjobName(t *testing.T) { }) } } + +func TestIsInMaintenanceWindow(t *testing.T) { + now := time.Now() + futureTimeStart := now.Add(1 * time.Hour) + futureTimeStartFormatted := futureTimeStart.Format("15:04") + futureTimeEnd := now.Add(2 * time.Hour) + futureTimeEndFormatted := futureTimeEnd.Format("15:04") + + tests := []struct { + name string + windows []acidv1.MaintenanceWindow + expected bool + }{ + { + name: "no maintenance windows", + windows: nil, + expected: true, + }, + { + name: "maintenance windows with everyday", + windows: []acidv1.MaintenanceWindow{ + { + Everyday: true, + StartTime: mustParseTime("00:00"), + EndTime: mustParseTime("23:59"), + }, + }, + expected: true, + }, + { + name: "maintenance windows with weekday", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime("00:00"), + EndTime: mustParseTime("23:59"), + }, + }, + expected: true, + }, + { + name: "maintenance windows with future interval time", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime(futureTimeStartFormatted), + EndTime: mustParseTime(futureTimeEndFormatted), + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cluster.Spec.MaintenanceWindows = tt.windows + if isInMaintenanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected { + t.Errorf("Expected isInMaintenanceWindow to return %t", tt.expected) + } + }) + } +} diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index 1a4c7c73f..fee18beaf 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -9,13 +9,13 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "github.com/aws/aws-sdk-go/aws" - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" - "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/filesystems" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/volumes" ) @@ -42,18 +42,14 @@ func (c *Cluster) syncVolumes() error { c.logger.Errorf("errors occured during EBS volume adjustments: %v", err) } } + } - // resize pvc to adjust filesystem size until better K8s support - if err = c.syncVolumeClaims(); err != nil { - err = fmt.Errorf("could not sync persistent volume claims: %v", err) - return err - } - } else if c.OpConfig.StorageResizeMode == "pvc" { - if err = c.syncVolumeClaims(); err != nil { - err = fmt.Errorf("could not sync persistent volume claims: %v", err) - return err - } - } else if c.OpConfig.StorageResizeMode == "ebs" { + if err = c.syncVolumeClaims(); err != nil { + err = fmt.Errorf("could not sync persistent volume claims: %v", err) + return err + } + + if c.OpConfig.StorageResizeMode == "ebs" { // potentially enlarge volumes before changing the statefulset. By doing that // in this order we make sure the operator is not stuck waiting for a pod that // cannot start because it ran out of disk space. @@ -64,15 +60,13 @@ func (c *Cluster) syncVolumes() error { err = fmt.Errorf("could not sync persistent volumes: %v", err) return err } - } else { - c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.") } return nil } func (c *Cluster) syncUnderlyingEBSVolume() error { - c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size") + c.logger.Debug("starting to sync EBS volumes: type, iops, throughput, and size") var ( err error @@ -142,7 +136,7 @@ func (c *Cluster) syncUnderlyingEBSVolume() error { } func (c *Cluster) populateVolumeMetaData() error { - c.logger.Infof("starting reading ebs meta data") + c.logger.Debug("starting reading ebs meta data") pvs, err := c.listPersistentVolumes() if err != nil { @@ -157,7 +151,7 @@ func (c *Cluster) populateVolumeMetaData() error { volumeIds := []string{} var volumeID string for _, pv := range pvs { - volumeID, err = c.VolumeResizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + volumeID, err = c.VolumeResizer.GetProviderVolumeID(pv) if err != nil { continue } @@ -171,7 +165,7 @@ func (c *Cluster) populateVolumeMetaData() error { } if len(currentVolumes) != len(c.EBSVolumes) && len(c.EBSVolumes) > 0 { - c.logger.Debugf("number of ebs volumes (%d) discovered differs from already known volumes (%d)", len(currentVolumes), len(c.EBSVolumes)) + c.logger.Infof("number of ebs volumes (%d) discovered differs from already known volumes (%d)", len(currentVolumes), len(c.EBSVolumes)) } // reset map, operator is not responsible for dangling ebs volumes @@ -187,21 +181,64 @@ func (c *Cluster) populateVolumeMetaData() error { func (c *Cluster) syncVolumeClaims() error { c.setProcessName("syncing volume claims") - needsResizing, err := c.volumeClaimsNeedResizing(c.Spec.Volume) + ignoreResize := false + + if c.OpConfig.StorageResizeMode == "off" || c.OpConfig.StorageResizeMode == "ebs" { + ignoreResize = true + c.logger.Debugf("Storage resize mode is set to %q. Skipping volume size sync of persistent volume claims.", c.OpConfig.StorageResizeMode) + } + + newSize, err := resource.ParseQuantity(c.Spec.Volume.Size) if err != nil { - return fmt.Errorf("could not compare size of the volume claims: %v", err) + return fmt.Errorf("could not parse volume size from the manifest: %v", err) } + manifestSize := quantityToGigabyte(newSize) - if !needsResizing { - c.logger.Infof("volume claims do not require changes") - return nil + pvcs, err := c.listPersistentVolumeClaims() + if err != nil { + return fmt.Errorf("could not list persistent volume claims: %v", err) } + for _, pvc := range pvcs { + c.VolumeClaims[pvc.UID] = &pvc + needsUpdate := false + currentSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) + if !ignoreResize && currentSize != manifestSize { + if currentSize < manifestSize { + pvc.Spec.Resources.Requests[v1.ResourceStorage] = newSize + needsUpdate = true + c.logger.Infof("persistent volume claim for volume %q needs to be resized", pvc.Name) + } else { + c.logger.Warningf("cannot shrink persistent volume") + } + } + + if needsUpdate { + c.logger.Infof("updating persistent volume claim definition for volume %q", pvc.Name) + updatedPvc, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("could not update persistent volume claim: %q", err) + } + c.VolumeClaims[pvc.UID] = updatedPvc + c.logger.Infof("successfully updated persistent volume claim %q", pvc.Name) + } else { + c.logger.Debugf("volume claim for volume %q do not require updates", pvc.Name) + } - if err := c.resizeVolumeClaims(c.Spec.Volume); err != nil { - return fmt.Errorf("could not sync volume claims: %v", err) + newAnnotations := c.annotationsSet(nil) + if changed, _ := c.compareAnnotations(pvc.Annotations, newAnnotations, nil); changed { + patchData, err := metaAnnotationsPatch(newAnnotations) + if err != nil { + return fmt.Errorf("could not form patch for the persistent volume claim for volume %q: %v", pvc.Name, err) + } + patchedPvc, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + if err != nil { + return fmt.Errorf("could not patch annotations of the persistent volume claim for volume %q: %v", pvc.Name, err) + } + c.VolumeClaims[pvc.UID] = patchedPvc + } } - c.logger.Infof("volume claims have been synced successfully") + c.logger.Debug("volume claims have been synced successfully") return nil } @@ -222,7 +259,7 @@ func (c *Cluster) syncEbsVolumes() error { return fmt.Errorf("could not sync volumes: %v", err) } - c.logger.Infof("volumes have been synced successfully") + c.logger.Debug("volumes have been synced successfully") return nil } @@ -235,58 +272,41 @@ func (c *Cluster) listPersistentVolumeClaims() ([]v1.PersistentVolumeClaim, erro pvcs, err := c.KubeClient.PersistentVolumeClaims(ns).List(context.TODO(), listOptions) if err != nil { - return nil, fmt.Errorf("could not list of PersistentVolumeClaims: %v", err) + return nil, fmt.Errorf("could not list of persistent volume claims: %v", err) } return pvcs.Items, nil } func (c *Cluster) deletePersistentVolumeClaims() error { - c.logger.Debugln("deleting PVCs") - pvcs, err := c.listPersistentVolumeClaims() - if err != nil { - return err - } - for _, pvc := range pvcs { - c.logger.Debugf("deleting PVC %q", util.NameFromMeta(pvc.ObjectMeta)) - if err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, c.deleteOptions); err != nil { - c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err) + c.setProcessName("deleting persistent volume claims") + errors := make([]string, 0) + for uid := range c.VolumeClaims { + err := c.deletePersistentVolumeClaim(uid) + if err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) } } - if len(pvcs) > 0 { - c.logger.Debugln("PVCs have been deleted") - } else { - c.logger.Debugln("no PVCs to delete") + + if len(errors) > 0 { + c.logger.Warningf("could not delete all persistent volume claims: %v", strings.Join(errors, `', '`)) } return nil } -func (c *Cluster) resizeVolumeClaims(newVolume acidv1.Volume) error { - c.logger.Debugln("resizing PVCs") - pvcs, err := c.listPersistentVolumeClaims() - if err != nil { - return err - } - newQuantity, err := resource.ParseQuantity(newVolume.Size) - if err != nil { - return fmt.Errorf("could not parse volume size: %v", err) - } - newSize := quantityToGigabyte(newQuantity) - for _, pvc := range pvcs { - volumeSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) - if volumeSize >= newSize { - if volumeSize > newSize { - c.logger.Warningf("cannot shrink persistent volume") - } - continue - } - pvc.Spec.Resources.Requests[v1.ResourceStorage] = newQuantity - c.logger.Debugf("updating persistent volume claim definition for volume %q", pvc.Name) - if _, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("could not update persistent volume claim: %q", err) - } - c.logger.Debugf("successfully updated persistent volume claim %q", pvc.Name) +func (c *Cluster) deletePersistentVolumeClaim(uid types.UID) error { + c.setProcessName("deleting persistent volume claim") + pvc := c.VolumeClaims[uid] + c.logger.Debugf("deleting persistent volume claim %q", pvc.Name) + err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, c.deleteOptions) + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("persistent volume claim %q has already been deleted", pvc.Name) + } else if err != nil { + return fmt.Errorf("could not delete persistent volume claim %q: %v", pvc.Name, err) } + c.logger.Infof("persistent volume claim %q has been deleted", pvc.Name) + delete(c.VolumeClaims, uid) + return nil } @@ -295,7 +315,7 @@ func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { pvcs, err := c.listPersistentVolumeClaims() if err != nil { - return nil, fmt.Errorf("could not list cluster's PersistentVolumeClaims: %v", err) + return nil, fmt.Errorf("could not list cluster's persistent volume claims: %v", err) } pods, err := c.listPods() @@ -378,22 +398,22 @@ func (c *Cluster) resizeVolumes() error { if err != nil { return err } - c.logger.Debugf("updating persistent volume %q to %d", pv.Name, newSize) + c.logger.Infof("updating persistent volume %q to %d", pv.Name, newSize) if err := resizer.ResizeVolume(awsVolumeID, newSize); err != nil { return fmt.Errorf("could not resize EBS volume %q: %v", awsVolumeID, err) } - c.logger.Debugf("resizing the filesystem on the volume %q", pv.Name) + c.logger.Infof("resizing the filesystem on the volume %q", pv.Name) podName := getPodNameFromPersistentVolume(pv) if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil { return fmt.Errorf("could not resize the filesystem on pod %q: %v", podName, err) } - c.logger.Debugf("filesystem resize successful on volume %q", pv.Name) + c.logger.Infof("filesystem resize successful on volume %q", pv.Name) pv.Spec.Capacity[v1.ResourceStorage] = newQuantity - c.logger.Debugf("updating persistent volume definition for volume %q", pv.Name) + c.logger.Infof("updating persistent volume definition for volume %q", pv.Name) if _, err := c.KubeClient.PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("could not update persistent volume: %q", err) } - c.logger.Debugf("successfully updated persistent volume %q", pv.Name) + c.logger.Infof("successfully updated persistent volume %q", pv.Name) if !compatible { c.logger.Warningf("volume %q is incompatible with all available resizing providers, consider switching storage_resize_mode to pvc or off", pv.Name) @@ -406,25 +426,6 @@ func (c *Cluster) resizeVolumes() error { return nil } -func (c *Cluster) volumeClaimsNeedResizing(newVolume acidv1.Volume) (bool, error) { - newSize, err := resource.ParseQuantity(newVolume.Size) - manifestSize := quantityToGigabyte(newSize) - if err != nil { - return false, fmt.Errorf("could not parse volume size from the manifest: %v", err) - } - pvcs, err := c.listPersistentVolumeClaims() - if err != nil { - return false, fmt.Errorf("could not receive persistent volume claims: %v", err) - } - for _, pvc := range pvcs { - currentSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) - if currentSize != manifestSize { - return true, nil - } - } - return false, nil -} - func (c *Cluster) volumesNeedResizing() (bool, error) { newQuantity, _ := resource.ParseQuantity(c.Spec.Volume.Size) newSize := quantityToGigabyte(newQuantity) @@ -473,7 +474,7 @@ func (c *Cluster) executeEBSMigration() error { } if !hasGp2 { - c.logger.Infof("no EBS gp2 volumes left to migrate") + c.logger.Debugf("no EBS gp2 volumes left to migrate") return nil } } diff --git a/pkg/cluster/volumes_test.go b/pkg/cluster/volumes_test.go index 4ef94fcfb..95ecc7624 100644 --- a/pkg/cluster/volumes_test.go +++ b/pkg/cluster/volumes_test.go @@ -74,6 +74,7 @@ func TestResizeVolumeClaim(t *testing.T) { cluster.Name = clusterName cluster.Namespace = namespace filterLabels := cluster.labelsSet(false) + cluster.Spec.Volume.Size = newVolumeSize // define and create PVCs for 1Gi volumes pvcList := CreatePVCs(namespace, clusterName, filterLabels, 2, "1Gi") @@ -85,14 +86,14 @@ func TestResizeVolumeClaim(t *testing.T) { } // test resizing - cluster.resizeVolumeClaims(acidv1.Volume{Size: newVolumeSize}) + cluster.syncVolumes() pvcs, err := cluster.listPersistentVolumeClaims() assert.NoError(t, err) // check if listPersistentVolumeClaims returns only the PVCs matching the filter if len(pvcs) != len(pvcList.Items)-1 { - t.Errorf("%s: could not find all PVCs, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1) + t.Errorf("%s: could not find all persistent volume claims, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1) } // check if PVCs were correctly resized @@ -164,7 +165,7 @@ func CreatePVCs(namespace string, clusterName string, labels labels.Set, n int, Labels: labels, }, Spec: v1.PersistentVolumeClaimSpec{ - Resources: v1.ResourceRequirements{ + Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: storage1Gi, }, @@ -215,6 +216,12 @@ func TestMigrateEBS(t *testing.T) { resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + resizer.EXPECT().GetProviderVolumeID(gomock.Any()). + DoAndReturn(func(pv *v1.PersistentVolume) (string, error) { + return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + }). + Times(2) + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( []volumes.VolumeProperties{ {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 100}, @@ -255,7 +262,7 @@ func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustern Labels: labels, }, Spec: v1.PersistentVolumeClaimSpec{ - Resources: v1.ResourceRequirements{ + Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: storage1Gi, }, @@ -321,6 +328,12 @@ func TestMigrateGp3Support(t *testing.T) { resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-3")).Return("ebs-volume-3", nil) + resizer.EXPECT().GetProviderVolumeID(gomock.Any()). + DoAndReturn(func(pv *v1.PersistentVolume) (string, error) { + return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + }). + Times(3) + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2", "ebs-volume-3"})).Return( []volumes.VolumeProperties{ {VolumeID: "ebs-volume-1", VolumeType: "gp3", Size: 100, Iops: 3000}, @@ -376,6 +389,12 @@ func TestManualGp2Gp3Support(t *testing.T) { resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + resizer.EXPECT().GetProviderVolumeID(gomock.Any()). + DoAndReturn(func(pv *v1.PersistentVolume) (string, error) { + return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + }). + Times(2) + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( []volumes.VolumeProperties{ {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000}, @@ -435,6 +454,12 @@ func TestDontTouchType(t *testing.T) { resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + resizer.EXPECT().GetProviderVolumeID(gomock.Any()). + DoAndReturn(func(pv *v1.PersistentVolume) (string, error) { + return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + }). + Times(2) + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( []volumes.VolumeProperties{ {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000}, diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 36c30d318..5739f6314 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix result.EtcdHost = fromCRD.EtcdHost result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps - result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-15:3.0-p1") + result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-17:4.0-p2") result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8) result.MinInstances = fromCRD.MinInstances result.MaxInstances = fromCRD.MaxInstances @@ -60,12 +60,13 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.PasswordRotationUserRetention = util.CoalesceUInt32(fromCRD.PostgresUsersConfiguration.DeepCopy().PasswordRotationUserRetention, 180) // major version upgrade config - result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "off") + result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "manual") result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList - result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "11") - result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "15") + result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "13") + result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "17") // kubernetes config + result.EnableOwnerReferences = util.CoalesceBool(fromCRD.Kubernetes.EnableOwnerReferences, util.False()) result.CustomPodAnnotations = fromCRD.Kubernetes.CustomPodAnnotations result.PodServiceAccountName = util.Coalesce(fromCRD.Kubernetes.PodServiceAccountName, "postgres-pod") result.PodServiceAccountDefinition = fromCRD.Kubernetes.PodServiceAccountDefinition @@ -82,6 +83,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local") result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace result.PDBNameFormat = fromCRD.Kubernetes.PDBNameFormat + result.PDBMasterLabelSelector = util.CoalesceBool(fromCRD.Kubernetes.PDBMasterLabelSelector, util.True()) result.EnablePodDisruptionBudget = util.CoalesceBool(fromCRD.Kubernetes.EnablePodDisruptionBudget, util.True()) result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "pvc") result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True()) @@ -90,6 +92,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName result.EnableCrossNamespaceSecret = fromCRD.Kubernetes.EnableCrossNamespaceSecret + result.EnableFinalizers = util.CoalesceBool(fromCRD.Kubernetes.EnableFinalizers, util.False()) result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName if fromCRD.Kubernetes.InfrastructureRolesDefs != nil { @@ -120,6 +123,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready") result.PersistentVolumeClaimRetentionPolicy = fromCRD.Kubernetes.PersistentVolumeClaimRetentionPolicy + result.EnableSecretsDeletion = util.CoalesceBool(fromCRD.Kubernetes.EnableSecretsDeletion, util.True()) + result.EnablePersistentVolumeClaimDeletion = util.CoalesceBool(fromCRD.Kubernetes.EnablePersistentVolumeClaimDeletion, util.True()) result.EnableReadinessProbe = fromCRD.Kubernetes.EnableReadinessProbe result.MasterPodMoveTimeout = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout), "10m") result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity @@ -128,12 +133,12 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.PodToleration = fromCRD.Kubernetes.PodToleration // Postgres Pod resources - result.DefaultCPURequest = util.Coalesce(fromCRD.PostgresPodResources.DefaultCPURequest, "100m") - result.DefaultMemoryRequest = util.Coalesce(fromCRD.PostgresPodResources.DefaultMemoryRequest, "100Mi") - result.DefaultCPULimit = util.Coalesce(fromCRD.PostgresPodResources.DefaultCPULimit, "1") - result.DefaultMemoryLimit = util.Coalesce(fromCRD.PostgresPodResources.DefaultMemoryLimit, "500Mi") - result.MinCPULimit = util.Coalesce(fromCRD.PostgresPodResources.MinCPULimit, "250m") - result.MinMemoryLimit = util.Coalesce(fromCRD.PostgresPodResources.MinMemoryLimit, "250Mi") + result.DefaultCPURequest = fromCRD.PostgresPodResources.DefaultCPURequest + result.DefaultMemoryRequest = fromCRD.PostgresPodResources.DefaultMemoryRequest + result.DefaultCPULimit = fromCRD.PostgresPodResources.DefaultCPULimit + result.DefaultMemoryLimit = fromCRD.PostgresPodResources.DefaultMemoryLimit + result.MinCPULimit = fromCRD.PostgresPodResources.MinCPULimit + result.MinMemoryLimit = fromCRD.PostgresPodResources.MinMemoryLimit result.MaxCPURequest = fromCRD.PostgresPodResources.MaxCPURequest result.MaxMemoryRequest = fromCRD.PostgresPodResources.MaxMemoryRequest @@ -169,18 +174,19 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials result.WALAZStorageAccount = fromCRD.AWSGCP.WALAZStorageAccount result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount - result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials") + result.AdditionalSecretMountPath = fromCRD.AWSGCP.AdditionalSecretMountPath result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration result.EnableEBSGp3MigrationMaxSize = util.CoalesceInt64(fromCRD.AWSGCP.EnableEBSGp3MigrationMaxSize, 1000) // logical backup config result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *") - result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.10.1") + result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0") result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3") result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey result.LogicalBackupAzureStorageContainer = fromCRD.LogicalBackup.AzureStorageContainer result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket + result.LogicalBackupS3BucketPrefix = util.Coalesce(fromCRD.LogicalBackup.S3BucketPrefix, "spilo") result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region result.LogicalBackupS3Endpoint = fromCRD.LogicalBackup.S3Endpoint result.LogicalBackupS3AccessKeyID = fromCRD.LogicalBackup.S3AccessKeyID @@ -189,6 +195,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.LogicalBackupS3RetentionTime = fromCRD.LogicalBackup.RetentionTime result.LogicalBackupGoogleApplicationCredentials = fromCRD.LogicalBackup.GoogleApplicationCredentials result.LogicalBackupJobPrefix = util.Coalesce(fromCRD.LogicalBackup.JobPrefix, "logical-backup-") + result.LogicalBackupCronjobEnvironmentSecret = fromCRD.LogicalBackup.CronjobEnvironmentSecret result.LogicalBackupCPURequest = fromCRD.LogicalBackup.CPURequest result.LogicalBackupMemoryRequest = fromCRD.LogicalBackup.MemoryRequest result.LogicalBackupCPULimit = fromCRD.LogicalBackup.CPULimit @@ -262,21 +269,10 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur fromCRD.ConnectionPooler.Mode, constants.ConnectionPoolerDefaultMode) - result.ConnectionPooler.ConnectionPoolerDefaultCPURequest = util.Coalesce( - fromCRD.ConnectionPooler.DefaultCPURequest, - constants.ConnectionPoolerDefaultCpuRequest) - - result.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest = util.Coalesce( - fromCRD.ConnectionPooler.DefaultMemoryRequest, - constants.ConnectionPoolerDefaultMemoryRequest) - - result.ConnectionPooler.ConnectionPoolerDefaultCPULimit = util.Coalesce( - fromCRD.ConnectionPooler.DefaultCPULimit, - constants.ConnectionPoolerDefaultCpuLimit) - - result.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit = util.Coalesce( - fromCRD.ConnectionPooler.DefaultMemoryLimit, - constants.ConnectionPoolerDefaultMemoryLimit) + result.ConnectionPooler.ConnectionPoolerDefaultCPURequest = fromCRD.ConnectionPooler.DefaultCPURequest + result.ConnectionPooler.ConnectionPoolerDefaultMemoryRequest = fromCRD.ConnectionPooler.DefaultMemoryRequest + result.ConnectionPooler.ConnectionPoolerDefaultCPULimit = fromCRD.ConnectionPooler.DefaultCPULimit + result.ConnectionPooler.ConnectionPoolerDefaultMemoryLimit = fromCRD.ConnectionPooler.DefaultMemoryLimit result.ConnectionPooler.MaxDBConnections = util.CoalesceInt32( fromCRD.ConnectionPooler.MaxDBConnections, diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index ede7a99a3..42d96278c 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -143,7 +143,7 @@ func (c *Controller) acquireInitialListOfClusters() error { if list, err = c.listClusters(metav1.ListOptions{ResourceVersion: "0"}); err != nil { return err } - c.logger.Debugf("acquiring initial list of clusters") + c.logger.Debug("acquiring initial list of clusters") for _, pg := range list.Items { // XXX: check the cluster status field instead if pg.Error != "" { @@ -285,14 +285,18 @@ func (c *Controller) processEvent(event ClusterEvent) { lg.Errorf("unknown cluster: %q", clusterName) return } - lg.Infoln("deletion of the cluster started") teamName := strings.ToLower(cl.Spec.TeamID) - c.curWorkerCluster.Store(event.WorkerID, cl) - cl.Delete() - // Fixme - no error handling for delete ? - // c.eventRecorder.Eventf(cl.GetReference, v1.EventTypeWarning, "Delete", "%v", cl.Error) + + // when using finalizers the deletion already happened + if c.opConfig.EnableFinalizers == nil || !*c.opConfig.EnableFinalizers { + lg.Infoln("deletion of the cluster started") + if err := cl.Delete(); err != nil { + cl.Error = fmt.Sprintf("could not delete cluster: %v", err) + c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Delete", "%v", cl.Error) + } + } func() { defer c.clustersMu.Unlock() @@ -325,16 +329,26 @@ func (c *Controller) processEvent(event ClusterEvent) { } c.curWorkerCluster.Store(event.WorkerID, cl) - err = cl.Sync(event.NewSpec) - if err != nil { - cl.Error = fmt.Sprintf("could not sync cluster: %v", err) - c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Sync", "%v", cl.Error) - lg.Error(cl.Error) - return + + // has this cluster been marked as deleted already, then we shall start cleaning up + if !cl.ObjectMeta.DeletionTimestamp.IsZero() { + lg.Infof("cluster has a DeletionTimestamp of %s, starting deletion now.", cl.ObjectMeta.DeletionTimestamp.Format(time.RFC3339)) + if err = cl.Delete(); err != nil { + cl.Error = fmt.Sprintf("error deleting cluster and its resources: %v", err) + c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Delete", "%v", cl.Error) + lg.Error(cl.Error) + return + } + } else { + if err = cl.Sync(event.NewSpec); err != nil { + cl.Error = fmt.Sprintf("could not sync cluster: %v", err) + c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Sync", "%v", cl.Error) + lg.Error(cl.Error) + return + } + lg.Infof("cluster has been synced") } cl.Error = "" - - lg.Infof("cluster has been synced") } } @@ -347,7 +361,7 @@ func (c *Controller) processClusterEventsQueue(idx int, stopCh <-chan struct{}, }() for { - obj, err := c.clusterEventQueues[idx].Pop(cache.PopProcessFunc(func(interface{}) error { return nil })) + obj, err := c.clusterEventQueues[idx].Pop(cache.PopProcessFunc(func(interface{}, bool) error { return nil })) if err != nil { if err == cache.ErrFIFOClosed { return @@ -370,10 +384,6 @@ func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *acidv1.Postg c.logger.Warningf("parameter %q is deprecated. Consider setting %q instead", deprecated, replacement) } - noeffect := func(param string, explanation string) { - c.logger.Warningf("parameter %q takes no effect. %s", param, explanation) - } - if spec.UseLoadBalancer != nil { deprecate("useLoadBalancer", "enableMasterLoadBalancer") } @@ -381,10 +391,6 @@ func (c *Controller) warnOnDeprecatedPostgreSQLSpecParameters(spec *acidv1.Postg deprecate("replicaLoadBalancer", "enableReplicaLoadBalancer") } - if len(spec.MaintenanceWindows) > 0 { - noeffect("maintenanceWindows", "Not implemented.") - } - if (spec.UseLoadBalancer != nil || spec.ReplicaLoadBalancer != nil) && (spec.EnableReplicaLoadBalancer != nil || spec.EnableMasterLoadBalancer != nil) { c.logger.Warnf("both old and new load balancer parameters are present in the manifest, ignoring old ones") @@ -440,19 +446,22 @@ func (c *Controller) queueClusterEvent(informerOldSpec, informerNewSpec *acidv1. clusterError = informerNewSpec.Error } - // only allow deletion if delete annotations are set and conditions are met if eventType == EventDelete { - if err := c.meetsClusterDeleteAnnotations(informerOldSpec); err != nil { - c.logger.WithField("cluster-name", clusterName).Warnf( - "ignoring %q event for cluster %q - manifest does not fulfill delete requirements: %s", eventType, clusterName, err) - c.logger.WithField("cluster-name", clusterName).Warnf( - "please, recreate Postgresql resource %q and set annotations to delete properly", clusterName) - if currentManifest, marshalErr := json.Marshal(informerOldSpec); marshalErr != nil { - c.logger.WithField("cluster-name", clusterName).Warnf("could not marshal current manifest:\n%+v", informerOldSpec) - } else { - c.logger.WithField("cluster-name", clusterName).Warnf("%s\n", string(currentManifest)) + // when owner references are used operator cannot block deletion + if c.opConfig.EnableOwnerReferences == nil || !*c.opConfig.EnableOwnerReferences { + // only allow deletion if delete annotations are set and conditions are met + if err := c.meetsClusterDeleteAnnotations(informerOldSpec); err != nil { + c.logger.WithField("cluster-name", clusterName).Warnf( + "ignoring %q event for cluster %q - manifest does not fulfill delete requirements: %s", eventType, clusterName, err) + c.logger.WithField("cluster-name", clusterName).Warnf( + "please, recreate Postgresql resource %q and set annotations to delete properly", clusterName) + if currentManifest, marshalErr := json.Marshal(informerOldSpec); marshalErr != nil { + c.logger.WithField("cluster-name", clusterName).Warnf("could not marshal current manifest:\n%+v", informerOldSpec) + } else { + c.logger.WithField("cluster-name", clusterName).Warnf("%s\n", string(currentManifest)) + } + return } - return } } @@ -560,13 +569,13 @@ func (c *Controller) postgresqlCheck(obj interface{}) *acidv1.Postgresql { } /* - Ensures the pod service account and role bindings exists in a namespace - before a PG cluster is created there so that a user does not have to deploy - these credentials manually. StatefulSets require the service account to - create pods; Patroni requires relevant RBAC bindings to access endpoints - or config maps. +Ensures the pod service account and role bindings exists in a namespace +before a PG cluster is created there so that a user does not have to deploy +these credentials manually. StatefulSets require the service account to +create pods; Patroni requires relevant RBAC bindings to access endpoints +or config maps. - The operator does not sync accounts/role bindings after creation. +The operator does not sync accounts/role bindings after creation. */ func (c *Controller) submitRBACCredentials(event ClusterEvent) error { diff --git a/pkg/controller/util.go b/pkg/controller/util.go index bd1e65d02..59e608ad0 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -76,11 +76,10 @@ func (c *Controller) createOperatorCRD(desiredCrd *apiextv1.CustomResourceDefini context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil { return fmt.Errorf("could not update customResourceDefinition %q: %v", crd.Name, err) } - } else { - c.logger.Infof("customResourceDefinition %q has been registered", crd.Name) } + c.logger.Infof("customResourceDefinition %q is registered", crd.Name) - return wait.Poll(c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, func() (bool, error) { + return wait.PollUntilContextTimeout(context.TODO(), c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, false, func(ctx context.Context) (bool, error) { c, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), desiredCrd.Name, metav1.GetOptions{}) if err != nil { return false, err diff --git a/pkg/controller/util_test.go b/pkg/controller/util_test.go index a4ca17728..4c3a9b356 100644 --- a/pkg/controller/util_test.go +++ b/pkg/controller/util_test.go @@ -132,7 +132,7 @@ func TestOldInfrastructureRoleFormat(t *testing.T) { for _, test := range testTable { roles, err := utilTestController.getInfrastructureRoles( []*config.InfrastructureRole{ - &config.InfrastructureRole{ + { SecretName: test.secretName, UserKey: "user", PasswordKey: "password", @@ -163,7 +163,7 @@ func TestNewInfrastructureRoleFormat(t *testing.T) { // one secret with one configmap { []spec.NamespacedName{ - spec.NamespacedName{ + { Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesNewSecretName, }, @@ -187,11 +187,11 @@ func TestNewInfrastructureRoleFormat(t *testing.T) { // multiple standalone secrets { []spec.NamespacedName{ - spec.NamespacedName{ + { Namespace: v1.NamespaceDefault, Name: "infrastructureroles-new-test1", }, - spec.NamespacedName{ + { Namespace: v1.NamespaceDefault, Name: "infrastructureroles-new-test2", }, @@ -248,7 +248,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) { // only new CRD format { []*config.InfrastructureRole{ - &config.InfrastructureRole{ + { SecretName: spec.NamespacedName{ Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesNewSecretName, @@ -262,7 +262,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) { spec.NamespacedName{}, "", []*config.InfrastructureRole{ - &config.InfrastructureRole{ + { SecretName: spec.NamespacedName{ Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesNewSecretName, @@ -280,7 +280,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) { spec.NamespacedName{}, "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role", []*config.InfrastructureRole{ - &config.InfrastructureRole{ + { SecretName: spec.NamespacedName{ Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesNewSecretName, @@ -298,7 +298,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) { spec.NamespacedName{}, "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, defaultrolevalue: test-role", []*config.InfrastructureRole{ - &config.InfrastructureRole{ + { SecretName: spec.NamespacedName{ Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesNewSecretName, @@ -319,7 +319,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) { }, "", []*config.InfrastructureRole{ - &config.InfrastructureRole{ + { SecretName: spec.NamespacedName{ Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesOldSecretName, @@ -334,7 +334,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) { // both formats for CRD { []*config.InfrastructureRole{ - &config.InfrastructureRole{ + { SecretName: spec.NamespacedName{ Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesNewSecretName, @@ -351,7 +351,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) { }, "", []*config.InfrastructureRole{ - &config.InfrastructureRole{ + { SecretName: spec.NamespacedName{ Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesNewSecretName, @@ -361,7 +361,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) { RoleKey: "test-role", Template: false, }, - &config.InfrastructureRole{ + { SecretName: spec.NamespacedName{ Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesOldSecretName, @@ -382,7 +382,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) { }, "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role", []*config.InfrastructureRole{ - &config.InfrastructureRole{ + { SecretName: spec.NamespacedName{ Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesNewSecretName, @@ -392,7 +392,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) { RoleKey: "test-role", Template: false, }, - &config.InfrastructureRole{ + { SecretName: spec.NamespacedName{ Namespace: v1.NamespaceDefault, Name: testInfrastructureRolesOldSecretName, diff --git a/pkg/generated/clientset/versioned/clientset.go b/pkg/generated/clientset/versioned/clientset.go index 3e827b65a..69725a952 100644 --- a/pkg/generated/clientset/versioned/clientset.go +++ b/pkg/generated/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/doc.go b/pkg/generated/clientset/versioned/doc.go index bf8f860ed..34b48f910 100644 --- a/pkg/generated/clientset/versioned/doc.go +++ b/pkg/generated/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go index bb34757a9..c85ad76f9 100644 --- a/pkg/generated/clientset/versioned/fake/clientset_generated.go +++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/fake/doc.go b/pkg/generated/clientset/versioned/fake/doc.go index b61991dbb..7548400fa 100644 --- a/pkg/generated/clientset/versioned/fake/doc.go +++ b/pkg/generated/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go index a156f8f52..225705881 100644 --- a/pkg/generated/clientset/versioned/fake/register.go +++ b/pkg/generated/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/scheme/doc.go b/pkg/generated/clientset/versioned/scheme/doc.go index c246feb50..1f79f0496 100644 --- a/pkg/generated/clientset/versioned/scheme/doc.go +++ b/pkg/generated/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/scheme/register.go b/pkg/generated/clientset/versioned/scheme/register.go index b1509deb9..6bbec0e5e 100644 --- a/pkg/generated/clientset/versioned/scheme/register.go +++ b/pkg/generated/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go index 44c350001..e070c7098 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/acid.zalan.do_client.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go index 974871d0a..5c6f06565 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go index 3fc0714cb..63b4b5b8f 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go index e864a8ad0..d45375335 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_acid.zalan.do_client.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go index 465bd1f7e..de1b9a0e3 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_operatorconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go index 6fc936eb1..b472c6057 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go index b7aef2c0d..5801666c8 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/fake/fake_postgresteam.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go index 034ffdb84..8a5e126d7 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go index 70477053e..c941551ca 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/operatorconfiguration.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go index 7e0a829cf..23133d22a 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go index 2106a0f34..c62f6c9d7 100644 --- a/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go +++ b/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1/postgresteam.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/zalando.org/v1/doc.go b/pkg/generated/clientset/versioned/typed/zalando.org/v1/doc.go index 974871d0a..5c6f06565 100644 --- a/pkg/generated/clientset/versioned/typed/zalando.org/v1/doc.go +++ b/pkg/generated/clientset/versioned/typed/zalando.org/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/zalando.org/v1/fabriceventstream.go b/pkg/generated/clientset/versioned/typed/zalando.org/v1/fabriceventstream.go index 581716c55..ae4a267d3 100644 --- a/pkg/generated/clientset/versioned/typed/zalando.org/v1/fabriceventstream.go +++ b/pkg/generated/clientset/versioned/typed/zalando.org/v1/fabriceventstream.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/doc.go b/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/doc.go index 3fc0714cb..63b4b5b8f 100644 --- a/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/doc.go +++ b/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/fake_fabriceventstream.go b/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/fake_fabriceventstream.go index 16cf81954..9885d8755 100644 --- a/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/fake_fabriceventstream.go +++ b/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/fake_fabriceventstream.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/fake_zalando.org_client.go b/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/fake_zalando.org_client.go index 2c151aff0..049cc72b2 100644 --- a/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/fake_zalando.org_client.go +++ b/pkg/generated/clientset/versioned/typed/zalando.org/v1/fake/fake_zalando.org_client.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/zalando.org/v1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/zalando.org/v1/generated_expansion.go index 6f23bf51d..4d1d3e37e 100644 --- a/pkg/generated/clientset/versioned/typed/zalando.org/v1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/zalando.org/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/clientset/versioned/typed/zalando.org/v1/zalando.org_client.go b/pkg/generated/clientset/versioned/typed/zalando.org/v1/zalando.org_client.go index 984879e91..a14c4dee3 100644 --- a/pkg/generated/clientset/versioned/typed/zalando.org/v1/zalando.org_client.go +++ b/pkg/generated/clientset/versioned/typed/zalando.org/v1/zalando.org_client.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/interface.go b/pkg/generated/informers/externalversions/acid.zalan.do/interface.go index 8ea00f8dc..74f5b0458 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/interface.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go index ee8873bee..24950b6fd 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go index 57f427331..179562e4c 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go index 019e6de83..79e6e872a 100644 --- a/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go +++ b/pkg/generated/informers/externalversions/acid.zalan.do/v1/postgresteam.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/factory.go b/pkg/generated/informers/externalversions/factory.go index b76fec377..2169366b5 100644 --- a/pkg/generated/informers/externalversions/factory.go +++ b/pkg/generated/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 3bb58ec46..66d94b2a2 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go index be8c83c01..a5d7b2299 100644 --- a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/zalando.org/interface.go b/pkg/generated/informers/externalversions/zalando.org/interface.go index 5911e683e..aab6846cb 100644 --- a/pkg/generated/informers/externalversions/zalando.org/interface.go +++ b/pkg/generated/informers/externalversions/zalando.org/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/zalando.org/v1/fabriceventstream.go b/pkg/generated/informers/externalversions/zalando.org/v1/fabriceventstream.go index 86b595193..2e767f426 100644 --- a/pkg/generated/informers/externalversions/zalando.org/v1/fabriceventstream.go +++ b/pkg/generated/informers/externalversions/zalando.org/v1/fabriceventstream.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/informers/externalversions/zalando.org/v1/interface.go b/pkg/generated/informers/externalversions/zalando.org/v1/interface.go index f7eedf04b..3b61f68a1 100644 --- a/pkg/generated/informers/externalversions/zalando.org/v1/interface.go +++ b/pkg/generated/informers/externalversions/zalando.org/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go index 1d2a4fa3b..dff5ce3f1 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go +++ b/pkg/generated/listers/acid.zalan.do/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go index 06c5aa2c0..de713421f 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/postgresql.go +++ b/pkg/generated/listers/acid.zalan.do/v1/postgresql.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go b/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go index b28fe8186..52256d158 100644 --- a/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go +++ b/pkg/generated/listers/acid.zalan.do/v1/postgresteam.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/listers/zalando.org/v1/expansion_generated.go b/pkg/generated/listers/zalando.org/v1/expansion_generated.go index 2af7fefad..201fa4ecf 100644 --- a/pkg/generated/listers/zalando.org/v1/expansion_generated.go +++ b/pkg/generated/listers/zalando.org/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/generated/listers/zalando.org/v1/fabriceventstream.go b/pkg/generated/listers/zalando.org/v1/fabriceventstream.go index eb549cfab..7c04027bf 100644 --- a/pkg/generated/listers/zalando.org/v1/fabriceventstream.go +++ b/pkg/generated/listers/zalando.org/v1/fabriceventstream.go @@ -1,5 +1,5 @@ /* -Copyright 2023 Compose, Zalando SE +Copyright 2025 Compose, Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/pkg/spec/types.go b/pkg/spec/types.go index 023f9660f..d727aee42 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -4,7 +4,6 @@ import ( "database/sql" "encoding/json" "fmt" - "io/ioutil" "log" "os" "strings" @@ -123,6 +122,9 @@ type ControllerConfig struct { IgnoredAnnotations []string EnableJsonLogging bool + + KubeQPS int + KubeBurst int } // cached value for the GetOperatorNamespace @@ -210,7 +212,7 @@ func GetOperatorNamespace() string { if namespaceFromEnvironment := os.Getenv("OPERATOR_NAMESPACE"); namespaceFromEnvironment != "" { return namespaceFromEnvironment } - operatorNamespaceBytes, err := ioutil.ReadFile(fileWithNamespace) + operatorNamespaceBytes, err := os.ReadFile(fileWithNamespace) if err != nil { log.Fatalf("Unable to detect operator namespace from within its pod due to: %v", err) } diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 7553bdbf9..30b967beb 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -25,6 +25,7 @@ type CRD struct { // Resources describes kubernetes resource specific configuration parameters type Resources struct { + EnableOwnerReferences *bool `name:"enable_owner_references" default:"false"` ResourceCheckInterval time.Duration `name:"resource_check_interval" default:"3s"` ResourceCheckTimeout time.Duration `name:"resource_check_timeout" default:"10m"` PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"` @@ -48,12 +49,12 @@ type Resources struct { DeleteAnnotationNameKey string `name:"delete_annotation_name_key"` PodRoleLabel string `name:"pod_role_label" default:"spilo-role"` PodToleration map[string]string `name:"toleration" default:""` - DefaultCPURequest string `name:"default_cpu_request" default:"100m"` - DefaultMemoryRequest string `name:"default_memory_request" default:"100Mi"` - DefaultCPULimit string `name:"default_cpu_limit" default:"1"` - DefaultMemoryLimit string `name:"default_memory_limit" default:"500Mi"` - MinCPULimit string `name:"min_cpu_limit" default:"250m"` - MinMemoryLimit string `name:"min_memory_limit" default:"250Mi"` + DefaultCPURequest string `name:"default_cpu_request"` + DefaultMemoryRequest string `name:"default_memory_request"` + DefaultCPULimit string `name:"default_cpu_limit"` + DefaultMemoryLimit string `name:"default_memory_limit"` + MinCPULimit string `name:"min_cpu_limit"` + MinMemoryLimit string `name:"min_memory_limit"` MaxCPURequest string `name:"max_cpu_request"` MaxMemoryRequest string `name:"max_memory_request"` PodEnvironmentConfigMap spec.NamespacedName `name:"pod_environment_configmap"` @@ -70,17 +71,17 @@ type Resources struct { type InfrastructureRole struct { // Name of a secret which describes the role, and optionally name of a // configmap with an extra information - SecretName spec.NamespacedName + SecretName spec.NamespacedName `json:"secretname,omitempty"` - UserKey string - PasswordKey string - RoleKey string + UserKey string `json:"userkey,omitempty"` + PasswordKey string `json:"passwordkey,omitempty"` + RoleKey string `json:"rolekey,omitempty"` - DefaultUserValue string - DefaultRoleValue string + DefaultUserValue string `json:"defaultuservalue,omitempty"` + DefaultRoleValue string `json:"defaultrolevalue,omitempty"` // This field point out the detailed yaml definition of the role, if exists - Details string + Details string `json:"details,omitempty"` // Specify if a secret contains multiple fields in the following format: // @@ -91,7 +92,7 @@ type InfrastructureRole struct { // If it does, Name/Password/Role are interpreted not as unique field // names, but as a template. - Template bool + Template bool `json:"template,omitempty"` } // Auth describes authentication specific configuration parameters @@ -126,12 +127,13 @@ type Scalyr struct { // LogicalBackup defines configuration for logical backup type LogicalBackup struct { LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` - LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.10.1"` + LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"` LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"` LogicalBackupAzureStorageAccountName string `name:"logical_backup_azure_storage_account_name" default:""` LogicalBackupAzureStorageContainer string `name:"logical_backup_azure_storage_container" default:""` LogicalBackupAzureStorageAccountKey string `name:"logical_backup_azure_storage_account_key" default:""` LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""` + LogicalBackupS3BucketPrefix string `name:"logical_backup_s3_bucket_prefix" default:"spilo"` LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""` LogicalBackupS3Endpoint string `name:"logical_backup_s3_endpoint" default:""` LogicalBackupS3AccessKeyID string `name:"logical_backup_s3_access_key_id" default:""` @@ -140,6 +142,7 @@ type LogicalBackup struct { LogicalBackupS3RetentionTime string `name:"logical_backup_s3_retention_time" default:""` LogicalBackupGoogleApplicationCredentials string `name:"logical_backup_google_application_credentials" default:""` LogicalBackupJobPrefix string `name:"logical_backup_job_prefix" default:"logical-backup-"` + LogicalBackupCronjobEnvironmentSecret string `name:"logical_backup_cronjob_environment_secret" default:""` LogicalBackupCPURequest string `name:"logical_backup_cpu_request"` LogicalBackupMemoryRequest string `name:"logical_backup_memory_request"` LogicalBackupCPULimit string `name:"logical_backup_cpu_limit"` @@ -154,10 +157,10 @@ type ConnectionPooler struct { Image string `name:"connection_pooler_image" default:"registry.opensource.zalan.do/acid/pgbouncer"` Mode string `name:"connection_pooler_mode" default:"transaction"` MaxDBConnections *int32 `name:"connection_pooler_max_db_connections" default:"60"` - ConnectionPoolerDefaultCPURequest string `name:"connection_pooler_default_cpu_request" default:"500m"` - ConnectionPoolerDefaultMemoryRequest string `name:"connection_pooler_default_memory_request" default:"100Mi"` - ConnectionPoolerDefaultCPULimit string `name:"connection_pooler_default_cpu_limit" default:"1"` - ConnectionPoolerDefaultMemoryLimit string `name:"connection_pooler_default_memory_limit" default:"100Mi"` + ConnectionPoolerDefaultCPURequest string `name:"connection_pooler_default_cpu_request"` + ConnectionPoolerDefaultMemoryRequest string `name:"connection_pooler_default_memory_request"` + ConnectionPoolerDefaultCPULimit string `name:"connection_pooler_default_cpu_limit"` + ConnectionPoolerDefaultMemoryLimit string `name:"connection_pooler_default_memory_limit"` } // Config describes operator config @@ -172,7 +175,7 @@ type Config struct { WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"` EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS - DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-15:3.0-p1"` + DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-17:4.0-p2"` SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers SidecarContainers []v1.Container `name:"sidecars"` PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` @@ -189,7 +192,7 @@ type Config struct { GCPCredentials string `name:"gcp_credentials"` WALAZStorageAccount string `name:"wal_az_storage_account"` AdditionalSecretMount string `name:"additional_secret_mount"` - AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"` + AdditionalSecretMountPath string `name:"additional_secret_mount_path"` EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"` EnableEBSGp3MigrationMaxSize int64 `name:"enable_ebs_gp3_migration_max_size" default:"1000"` DebugLogging bool `name:"debug_logging" default:"true"` @@ -219,6 +222,7 @@ type Config struct { ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{namespace}.{hostedzone}"` ReplicaLegacyDNSNameFormat StringTemplate `name:"replica_legacy_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"` PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"` + PDBMasterLabelSelector *bool `name:"pdb_master_label_selector" default:"true"` EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"` EnableInitContainers *bool `name:"enable_init_containers" default:"true"` EnableSidecars *bool `name:"enable_sidecars" default:"true"` @@ -236,16 +240,19 @@ type Config struct { SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"` EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"` EnableCrossNamespaceSecret bool `name:"enable_cross_namespace_secret" default:"false"` + EnableFinalizers *bool `name:"enable_finalizers" default:"false"` EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"` EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"` EnableTeamIdClusternamePrefix bool `name:"enable_team_id_clustername_prefix" default:"false"` - MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"off"` + MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"manual"` MajorVersionUpgradeTeamAllowList []string `name:"major_version_upgrade_team_allow_list" default:""` - MinimalMajorVersion string `name:"minimal_major_version" default:"11"` - TargetMajorVersion string `name:"target_major_version" default:"15"` + MinimalMajorVersion string `name:"minimal_major_version" default:"13"` + TargetMajorVersion string `name:"target_major_version" default:"17"` PatroniAPICheckInterval time.Duration `name:"patroni_api_check_interval" default:"1s"` PatroniAPICheckTimeout time.Duration `name:"patroni_api_check_timeout" default:"5s"` EnablePatroniFailsafeMode *bool `name:"enable_patroni_failsafe_mode" default:"false"` + EnableSecretsDeletion *bool `name:"enable_secrets_deletion" default:"true"` + EnablePersistentVolumeClaimDeletion *bool `name:"enable_persistent_volume_claim_deletion" default:"true"` PersistentVolumeClaimRetentionPolicy map[string]string `name:"persistent_volume_claim_retention_policy" default:"when_deleted:retain,when_scaled:retain"` } diff --git a/pkg/util/constants/aws.go b/pkg/util/constants/aws.go index f1cfd5975..147e58889 100644 --- a/pkg/util/constants/aws.go +++ b/pkg/util/constants/aws.go @@ -7,6 +7,7 @@ const ( // EBS related constants EBSVolumeIDStart = "/vol-" EBSProvisioner = "kubernetes.io/aws-ebs" + EBSDriver = "ebs.csi.aws.com" //https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VolumeModification.html EBSVolumeStateModifying = "modifying" EBSVolumeStateOptimizing = "optimizing" diff --git a/pkg/util/constants/streams.go b/pkg/util/constants/streams.go index 8916701f3..cb4bb6a3f 100644 --- a/pkg/util/constants/streams.go +++ b/pkg/util/constants/streams.go @@ -2,16 +2,19 @@ package constants // PostgreSQL specific constants const ( - EventStreamCRDApiVersion = "zalando.org/v1" - EventStreamCRDKind = "FabricEventStream" - EventStreamCRDName = "fabriceventstreams.zalando.org" - EventStreamSourcePGType = "PostgresLogicalReplication" - EventStreamSourceSlotPrefix = "fes" - EventStreamSourcePluginType = "pgoutput" - EventStreamSourceAuthType = "DatabaseAuthenticationSecret" - EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent" - EventStreamSinkNakadiType = "Nakadi" - EventStreamRecoveryNoneType = "None" - EventStreamRecoveryDLQType = "DeadLetter" - EventStreamRecoverySuffix = "dead-letter-queue" + EventStreamCRDApiVersion = "zalando.org/v1" + EventStreamCRDKind = "FabricEventStream" + EventStreamCRDName = "fabriceventstreams.zalando.org" + EventStreamSourcePGType = "PostgresLogicalReplication" + EventStreamSourceSlotPrefix = "fes" + EventStreamSourcePluginType = "pgoutput" + EventStreamSourceAuthType = "DatabaseAuthenticationSecret" + EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent" + EventStreamSinkNakadiType = "Nakadi" + EventStreamRecoveryDLQType = "DeadLetter" + EventStreamRecoveryIgnoreType = "Ignore" + EventStreamRecoveryNoneType = "None" + EventStreamRecoverySuffix = "dead-letter-queue" + EventStreamCpuAnnotationKey = "fes.zalando.org/FES_CPU" + EventStreamMemoryAnnotationKey = "fes.zalando.org/FES_MEMORY" ) diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index 948ce520a..de1fb605a 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -3,14 +3,10 @@ package k8sutil import ( "context" "fmt" - "reflect" b64 "encoding/base64" "encoding/json" - batchv1 "k8s.io/api/batch/v1" - clientbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" - apiacidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" zalandoclient "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" acidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" @@ -18,14 +14,15 @@ import ( "github.com/zalando/postgres-operator/pkg/spec" apiappsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - apipolicyv1 "k8s.io/api/policy/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + apiextv1client "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" policyv1 "k8s.io/client-go/kubernetes/typed/policy/v1" rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" @@ -61,9 +58,9 @@ type KubernetesClient struct { appsv1.StatefulSetsGetter appsv1.DeploymentsGetter rbacv1.RoleBindingsGetter + batchv1.CronJobsGetter policyv1.PodDisruptionBudgetsGetter - apiextv1.CustomResourceDefinitionsGetter - clientbatchv1.CronJobsGetter + apiextv1client.CustomResourceDefinitionsGetter acidv1.OperatorConfigurationsGetter acidv1.PostgresTeamsGetter acidv1.PostgresqlsGetter @@ -74,6 +71,13 @@ type KubernetesClient struct { Zalandov1ClientSet *zalandoclient.Clientset } +type mockCustomResourceDefinition struct { + apiextv1client.CustomResourceDefinitionInterface +} + +type MockCustomResourceDefinitionsGetter struct { +} + type mockSecret struct { corev1.SecretInterface } @@ -209,58 +213,50 @@ func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.Namespaced return pg, fmt.Errorf("could not update status: %v", err) } - // update the spec, maintaining the new resourceVersion. return pg, nil } -// SamePDB compares the PodDisruptionBudgets -func SamePDB(cur, new *apipolicyv1.PodDisruptionBudget) (match bool, reason string) { - //TODO: improve comparison - match = reflect.DeepEqual(new.Spec, cur.Spec) - if !match { - reason = "new PDB spec does not match the current one" +// SetFinalizer of Postgres cluster +func (client *KubernetesClient) SetFinalizer(clusterName spec.NamespacedName, pg *apiacidv1.Postgresql, finalizers []string) (*apiacidv1.Postgresql, error) { + var ( + updatedPg *apiacidv1.Postgresql + patch []byte + err error + ) + pg.ObjectMeta.Finalizers = finalizers + + if len(finalizers) > 0 { + patch, err = json.Marshal(struct { + PgMetadata interface{} `json:"metadata"` + }{&pg.ObjectMeta}) + if err != nil { + return pg, fmt.Errorf("could not marshal ObjectMeta: %v", err) + } + + updatedPg, err = client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).Patch( + context.TODO(), clusterName.Name, types.MergePatchType, patch, metav1.PatchOptions{}) + } else { + // in case finalizers are empty and update is needed to remove + updatedPg, err = client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).Update( + context.TODO(), pg, metav1.UpdateOptions{}) + } + if err != nil { + return updatedPg, fmt.Errorf("could not set finalizer: %v", err) } - return + return updatedPg, nil } -func getJobImage(cronJob *batchv1.CronJob) string { - return cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image +func (c *mockCustomResourceDefinition) Get(ctx context.Context, name string, options metav1.GetOptions) (*apiextv1.CustomResourceDefinition, error) { + return &apiextv1.CustomResourceDefinition{}, nil } -func getPgVersion(cronJob *batchv1.CronJob) string { - envs := cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Env - for _, env := range envs { - if env.Name == "PG_VERSION" { - return env.Value - } - } - return "" +func (c *mockCustomResourceDefinition) Create(ctx context.Context, crd *apiextv1.CustomResourceDefinition, options metav1.CreateOptions) (*apiextv1.CustomResourceDefinition, error) { + return &apiextv1.CustomResourceDefinition{}, nil } -// SameLogicalBackupJob compares Specs of logical backup cron jobs -func SameLogicalBackupJob(cur, new *batchv1.CronJob) (match bool, reason string) { - - if cur.Spec.Schedule != new.Spec.Schedule { - return false, fmt.Sprintf("new job's schedule %q does not match the current one %q", - new.Spec.Schedule, cur.Spec.Schedule) - } - - newImage := getJobImage(new) - curImage := getJobImage(cur) - if newImage != curImage { - return false, fmt.Sprintf("new job's image %q does not match the current one %q", - newImage, curImage) - } - - newPgVersion := getPgVersion(new) - curPgVersion := getPgVersion(cur) - if newPgVersion != curPgVersion { - return false, fmt.Sprintf("new job's env PG_VERSION %q does not match the current one %q", - newPgVersion, curPgVersion) - } - - return true, "" +func (mock *MockCustomResourceDefinitionsGetter) CustomResourceDefinitions() apiextv1client.CustomResourceDefinitionInterface { + return &mockCustomResourceDefinition{} } func (c *mockSecret) Get(ctx context.Context, name string, options metav1.GetOptions) (*v1.Secret, error) { @@ -376,7 +372,7 @@ func (mock *mockDeployment) Get(ctx context.Context, name string, opts metav1.Ge Template: v1.PodTemplateSpec{ Spec: v1.PodSpec{ Containers: []v1.Container{ - v1.Container{ + { Image: "pooler:1.0", }, }, @@ -467,6 +463,8 @@ func NewMockKubernetesClient() KubernetesClient { ConfigMapsGetter: &MockConfigMapsGetter{}, DeploymentsGetter: &MockDeploymentGetter{}, ServicesGetter: &MockServiceGetter{}, + + CustomResourceDefinitionsGetter: &MockCustomResourceDefinitionsGetter{}, } } diff --git a/pkg/util/patroni/patroni.go b/pkg/util/patroni/patroni.go index 7f8f63374..2129f1acc 100644 --- a/pkg/util/patroni/patroni.go +++ b/pkg/util/patroni/patroni.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "math" "net" "net/http" @@ -20,20 +20,21 @@ import ( ) const ( - failoverPath = "/failover" - configPath = "/config" - clusterPath = "/cluster" - statusPath = "/patroni" - restartPath = "/restart" - ApiPort = 8008 - timeout = 30 * time.Second + switchoverPath = "/switchover" + configPath = "/config" + clusterPath = "/cluster" + statusPath = "/patroni" + restartPath = "/restart" + ApiPort = 8008 + timeout = 30 * time.Second ) // Interface describe patroni methods type Interface interface { GetClusterMembers(master *v1.Pod) ([]ClusterMember, error) - Switchover(master *v1.Pod, candidate string) error + Switchover(master *v1.Pod, candidate string, scheduled_at string) error SetPostgresParameters(server *v1.Pod, options map[string]string) error + SetStandbyClusterParameters(server *v1.Pod, options map[string]interface{}) error GetMemberData(server *v1.Pod) (MemberData, error) Restart(server *v1.Pod) error GetConfig(server *v1.Pod) (acidv1.Patroni, map[string]string, error) @@ -102,8 +103,8 @@ func (p *Patroni) httpPostOrPatch(method string, url string, body *bytes.Buffer) } }() - if resp.StatusCode != http.StatusOK { - bodyBytes, err := ioutil.ReadAll(resp.Body) + if resp.StatusCode < http.StatusOK || resp.StatusCode >= 300 { + bodyBytes, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("could not read response: %v", err) } @@ -122,12 +123,12 @@ func (p *Patroni) httpGet(url string) (string, error) { } defer response.Body.Close() - bodyBytes, err := ioutil.ReadAll(response.Body) + bodyBytes, err := io.ReadAll(response.Body) if err != nil { return "", fmt.Errorf("could not read response: %v", err) } - if response.StatusCode != http.StatusOK { + if response.StatusCode < http.StatusOK || response.StatusCode >= 300 { return string(bodyBytes), fmt.Errorf("patroni returned '%d'", response.StatusCode) } @@ -135,9 +136,9 @@ func (p *Patroni) httpGet(url string) (string, error) { } // Switchover by calling Patroni REST API -func (p *Patroni) Switchover(master *v1.Pod, candidate string) error { +func (p *Patroni) Switchover(master *v1.Pod, candidate string, scheduled_at string) error { buf := &bytes.Buffer{} - err := json.NewEncoder(buf).Encode(map[string]string{"leader": master.Name, "member": candidate}) + err := json.NewEncoder(buf).Encode(map[string]string{"leader": master.Name, "member": candidate, "scheduled_at": scheduled_at}) if err != nil { return fmt.Errorf("could not encode json: %v", err) } @@ -145,12 +146,12 @@ func (p *Patroni) Switchover(master *v1.Pod, candidate string) error { if err != nil { return err } - return p.httpPostOrPatch(http.MethodPost, apiURLString+failoverPath, buf) + return p.httpPostOrPatch(http.MethodPost, apiURLString+switchoverPath, buf) } //TODO: add an option call /patroni to check if it is necessary to restart the server -//SetPostgresParameters sets Postgres options via Patroni patch API call. +// SetPostgresParameters sets Postgres options via Patroni patch API call. func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]string) error { buf := &bytes.Buffer{} err := json.NewEncoder(buf).Encode(map[string]map[string]interface{}{"postgresql": {"parameters": parameters}}) @@ -164,7 +165,12 @@ func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]st return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf) } -//SetConfig sets Patroni options via Patroni patch API call. +// SetStandbyClusterParameters sets StandbyCluster options via Patroni patch API call. +func (p *Patroni) SetStandbyClusterParameters(server *v1.Pod, parameters map[string]interface{}) error { + return p.SetConfig(server, map[string]interface{}{"standby_cluster": parameters}) +} + +// SetConfig sets Patroni options via Patroni patch API call. func (p *Patroni) SetConfig(server *v1.Pod, config map[string]interface{}) error { buf := &bytes.Buffer{} err := json.NewEncoder(buf).Encode(config) diff --git a/pkg/util/patroni/patroni_test.go b/pkg/util/patroni/patroni_test.go index 216a46b86..39b498d2e 100644 --- a/pkg/util/patroni/patroni_test.go +++ b/pkg/util/patroni/patroni_test.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "math" "net/http" "reflect" @@ -99,30 +99,30 @@ func TestGetClusterMembers(t *testing.T) { }, { Name: "acid-test-cluster-1", Role: "sync_standby", - State: "running", + State: "streaming", Timeline: 1, Lag: 0, }, { Name: "acid-test-cluster-2", Role: "replica", - State: "running", + State: "streaming", Timeline: 1, Lag: math.MaxUint64, }, { Name: "acid-test-cluster-3", Role: "replica", - State: "running", + State: "in archive recovery", Timeline: 1, Lag: 3000000000, }} json := `{"members": [ {"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, - {"name": "acid-test-cluster-1", "role": "sync_standby", "state": "running", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}, - {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": "unknown"}, - {"name": "acid-test-cluster-3", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 3000000000} + {"name": "acid-test-cluster-1", "role": "sync_standby", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 0}, + {"name": "acid-test-cluster-2", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": "unknown"}, + {"name": "acid-test-cluster-3", "role": "replica", "state": "in archive recovery", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 3000000000} ]}` - r := ioutil.NopCloser(bytes.NewReader([]byte(json))) + r := io.NopCloser(bytes.NewReader([]byte(json))) response := http.Response{ StatusCode: 200, @@ -161,7 +161,7 @@ func TestGetMemberData(t *testing.T) { } json := `{"state": "running", "postmaster_start_time": "2021-02-19 14:31:50.053 CET", "role": "master", "server_version": 130004, "cluster_unlocked": false, "xlog": {"location": 123456789}, "timeline": 1, "database_system_identifier": "6462555844314089962", "pending_restart": true, "patroni": {"version": "2.1.1", "scope": "acid-test-cluster"}}` - r := ioutil.NopCloser(bytes.NewReader([]byte(json))) + r := io.NopCloser(bytes.NewReader([]byte(json))) response := http.Response{ StatusCode: 200, @@ -230,7 +230,7 @@ func TestGetConfig(t *testing.T) { } configJson := `{"loop_wait": 10, "maximum_lag_on_failover": 33554432, "postgresql": {"parameters": {"archive_mode": "on", "archive_timeout": "1800s", "autovacuum_analyze_scale_factor": 0.02, "autovacuum_max_workers": 5, "autovacuum_vacuum_scale_factor": 0.05, "checkpoint_completion_target": 0.9, "hot_standby": "on", "log_autovacuum_min_duration": 0, "log_checkpoints": "on", "log_connections": "on", "log_disconnections": "on", "log_line_prefix": "%t [%p]: [%l-1] %c %x %d %u %a %h ", "log_lock_waits": "on", "log_min_duration_statement": 500, "log_statement": "ddl", "log_temp_files": 0, "max_connections": 100, "max_replication_slots": 10, "max_wal_senders": 10, "tcp_keepalives_idle": 900, "tcp_keepalives_interval": 100, "track_functions": "all", "wal_level": "hot_standby", "wal_log_hints": "on"}, "use_pg_rewind": true, "use_slots": true}, "retry_timeout": 10, "slots": {"cdc": {"database": "foo", "plugin": "pgoutput", "type": "logical"}}, "ttl": 30}` - r := ioutil.NopCloser(bytes.NewReader([]byte(configJson))) + r := io.NopCloser(bytes.NewReader([]byte(configJson))) response := http.Response{ StatusCode: 200, @@ -265,7 +265,7 @@ func TestSetPostgresParameters(t *testing.T) { } configJson := `{"loop_wait": 10, "maximum_lag_on_failover": 33554432, "postgresql": {"parameters": {"archive_mode": "on", "archive_timeout": "1800s", "autovacuum_analyze_scale_factor": 0.02, "autovacuum_max_workers": 5, "autovacuum_vacuum_scale_factor": 0.05, "checkpoint_completion_target": 0.9, "hot_standby": "on", "log_autovacuum_min_duration": 0, "log_checkpoints": "on", "log_connections": "on", "log_disconnections": "on", "log_line_prefix": "%t [%p]: [%l-1] %c %x %d %u %a %h ", "log_lock_waits": "on", "log_min_duration_statement": 500, "log_statement": "ddl", "log_temp_files": 0, "max_connections": 50, "max_replication_slots": 10, "max_wal_senders": 10, "tcp_keepalives_idle": 900, "tcp_keepalives_interval": 100, "track_functions": "all", "wal_level": "logical", "wal_log_hints": "on"}, "use_pg_rewind": true, "use_slots": true}, "retry_timeout": 10, "slots": {"cdc": {"database": "foo", "plugin": "pgoutput", "type": "logical"}}, "ttl": 30}` - r := ioutil.NopCloser(bytes.NewReader([]byte(configJson))) + r := io.NopCloser(bytes.NewReader([]byte(configJson))) response := http.Response{ StatusCode: 200, diff --git a/pkg/util/users/users.go b/pkg/util/users/users.go index 4d9a21f73..924d8390e 100644 --- a/pkg/util/users/users.go +++ b/pkg/util/users/users.go @@ -24,7 +24,7 @@ const ( doBlockStmt = `SET LOCAL synchronous_commit = 'local'; DO $$ BEGIN %s; END;$$;` passwordTemplate = "ENCRYPTED PASSWORD '%s'" inRoleTemplate = `IN ROLE %s` - adminTemplate = `ADMIN %s` + adminTemplate = `ADMIN "%s"` ) // DefaultUserSyncStrategy implements a user sync strategy that merges already existing database users diff --git a/pkg/util/util.go b/pkg/util/util.go index 504455f47..4b3aafc63 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -35,7 +35,7 @@ const ( var passwordChars = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") func init() { - rand.Seed(time.Now().Unix()) + rand.New(rand.NewSource(time.Now().Unix())) } // helper function to get bool pointers @@ -152,6 +152,17 @@ func IsEqualIgnoreOrder(a, b []string) bool { return reflect.DeepEqual(a_copy, b_copy) } +// Iterate through slice and remove certain string, then return cleaned slice +func RemoveString(slice []string, s string) (result []string) { + for _, item := range slice { + if item == s { + continue + } + result = append(result, item) + } + return result +} + // SliceReplaceElement func StringSliceReplaceElement(s []string, a, b string) (result []string) { tmp := make([]string, 0, len(s)) diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 6444bb48f..37e41f1cf 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -66,6 +66,17 @@ var substractTest = []struct { {[]string{"a"}, []string{""}, []string{"a"}, false}, } +var removeStringTest = []struct { + slice []string + item string + result []string +}{ + {[]string{"a", "b", "c"}, "b", []string{"a", "c"}}, + {[]string{"a"}, "b", []string{"a"}}, + {[]string{"a"}, "a", []string{}}, + {[]string{}, "a", []string{}}, +} + var sliceContaintsTest = []struct { slice []string item string @@ -200,6 +211,15 @@ func TestFindNamedStringSubmatch(t *testing.T) { } } +func TestRemoveString(t *testing.T) { + for _, tt := range removeStringTest { + res := RemoveString(tt.slice, tt.item) + if !IsEqualIgnoreOrder(res, tt.result) { + t.Errorf("RemoveString expected: %#v, got: %#v", tt.result, res) + } + } +} + func TestSliceContains(t *testing.T) { for _, tt := range sliceContaintsTest { res := SliceContains(tt.slice, tt.item) diff --git a/pkg/util/volumes/ebs.go b/pkg/util/volumes/ebs.go index f625dab2f..cb8f8e97f 100644 --- a/pkg/util/volumes/ebs.go +++ b/pkg/util/volumes/ebs.go @@ -36,7 +36,8 @@ func (r *EBSVolumeResizer) IsConnectedToProvider() bool { // VolumeBelongsToProvider checks if the given persistent volume is backed by EBS. func (r *EBSVolumeResizer) VolumeBelongsToProvider(pv *v1.PersistentVolume) bool { - return pv.Spec.AWSElasticBlockStore != nil && pv.Annotations[constants.VolumeStorateProvisionerAnnotation] == constants.EBSProvisioner + return (pv.Spec.AWSElasticBlockStore != nil && pv.Annotations[constants.VolumeStorateProvisionerAnnotation] == constants.EBSProvisioner) || + (pv.Spec.CSI != nil && pv.Spec.CSI.Driver == constants.EBSDriver) } // ExtractVolumeID extracts volumeID from "aws://eu-central-1a/vol-075ddfc4a127d0bd4" @@ -54,7 +55,12 @@ func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) { // GetProviderVolumeID converts aws://eu-central-1b/vol-00f93d4827217c629 to vol-00f93d4827217c629 for EBS volumes func (r *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) { - volumeID := pv.Spec.AWSElasticBlockStore.VolumeID + var volumeID string = "" + if pv.Spec.CSI != nil { + volumeID = pv.Spec.CSI.VolumeHandle + } else if pv.Spec.AWSElasticBlockStore != nil { + volumeID = pv.Spec.AWSElasticBlockStore.VolumeID + } if volumeID == "" { return "", fmt.Errorf("got empty volume id for volume %v", pv) } diff --git a/pkg/util/volumes/ebs_test.go b/pkg/util/volumes/ebs_test.go new file mode 100644 index 000000000..6f722ff7b --- /dev/null +++ b/pkg/util/volumes/ebs_test.go @@ -0,0 +1,123 @@ +package volumes + +import ( + "fmt" + "testing" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestGetProviderVolumeID(t *testing.T) { + tests := []struct { + name string + pv *v1.PersistentVolume + expected string + err error + }{ + { + name: "CSI volume handle", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{ + VolumeHandle: "vol-075ddfc4a127d0bd5", + }, + }, + }, + }, + expected: "vol-075ddfc4a127d0bd5", + err: nil, + }, + { + name: "AWS EBS volume handle", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws://eu-central-1a/vol-075ddfc4a127d0bd4", + }, + }, + }, + }, + expected: "vol-075ddfc4a127d0bd4", + err: nil, + }, + { + name: "Empty volume handle", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{}, + }, + expected: "", + err: fmt.Errorf("got empty volume id for volume %v", &v1.PersistentVolume{}), + }, + } + + resizer := EBSVolumeResizer{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + volumeID, err := resizer.GetProviderVolumeID(tt.pv) + if volumeID != tt.expected || (err != nil && err.Error() != tt.err.Error()) { + t.Errorf("expected %v, got %v, expected err %v, got %v", tt.expected, volumeID, tt.err, err) + } + }) + } +} + +func TestVolumeBelongsToProvider(t *testing.T) { + tests := []struct { + name string + pv *v1.PersistentVolume + expected bool + }{ + { + name: "CSI volume handle", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{ + Driver: "ebs.csi.aws.com", + VolumeHandle: "vol-075ddfc4a127d0bd5", + }, + }, + }, + }, + expected: true, + }, + { + name: "AWS EBS volume handle", + pv: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string { + "pv.kubernetes.io/provisioned-by": "kubernetes.io/aws-ebs", + }, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws://eu-central-1a/vol-075ddfc4a127d0bd4", + }, + }, + }, + }, + expected: true, + }, + { + name: "Empty volume source", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resizer := EBSVolumeResizer{} + isProvider := resizer.VolumeBelongsToProvider(tt.pv) + if isProvider != tt.expected { + t.Errorf("expected %v, got %v", tt.expected, isProvider) + } + }) + } +} diff --git a/ui/.dockerignore b/ui/.dockerignore index a53cb76a3..2bc7915f1 100644 --- a/ui/.dockerignore +++ b/ui/.dockerignore @@ -5,6 +5,8 @@ .git __pycache__ +.npm/ + app/node_modules operator_ui/static/build/*.hot-update.js operator_ui/static/build/*.hot-update.json diff --git a/ui/Dockerfile b/ui/Dockerfile index 63e8817e6..51f1d7744 100644 --- a/ui/Dockerfile +++ b/ui/Dockerfile @@ -1,41 +1,34 @@ -FROM registry.opensource.zalan.do/library/alpine-3.15:latest +ARG BASE_IMAGE=registry.opensource.zalan.do/library/python-3.11-slim:latest +ARG NODE_IMAGE=node:lts-alpine + +FROM $NODE_IMAGE AS build + +COPY . /workdir +WORKDIR /workdir/app + +RUN npm install \ + && npm run build + +FROM $BASE_IMAGE LABEL maintainer="Team ACID @ Zalando " EXPOSE 8081 +WORKDIR /app + +RUN apt-get -qq -y update \ + # https://www.psycopg.org/docs/install.html#psycopg-vs-psycopg-binary + && apt-get -qq -y install --no-install-recommends g++ libpq-dev python3-dev python3-distutils \ + && apt-get -qq -y clean \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt . +COPY start_server.sh . +RUN pip install -r requirements.txt -RUN \ - apk add --no-cache \ - alpine-sdk \ - autoconf \ - automake \ - ca-certificates \ - libffi-dev \ - libtool \ - python3 \ - python3-dev \ - zlib-dev \ - && \ - python3 -m ensurepip && \ - rm -r /usr/lib/python*/ensurepip && \ - pip3 install --upgrade \ - gevent \ - jq \ - pip \ - setuptools \ - && \ - rm -rf \ - /root/.cache \ - /tmp/* \ - /var/cache/apk/* - -COPY requirements.txt / -COPY start_server.sh / -RUN pip3 install -r /requirements.txt - -COPY operator_ui /operator_ui +COPY operator_ui operator_ui/ +COPY --from=build /workdir/operator_ui/static/build/ operator_ui/static/build/ ARG VERSION=dev -RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" /operator_ui/__init__.py +RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" operator_ui/__init__.py -WORKDIR / -CMD ["/usr/bin/python3", "-m", "operator_ui"] +CMD ["python", "-m", "operator_ui"] diff --git a/ui/Makefile b/ui/Makefile index 7d86b2df6..8f88982ab 100644 --- a/ui/Makefile +++ b/ui/Makefile @@ -21,8 +21,8 @@ test: tox appjs: - docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:14.21.2-alpine npm install - docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:14.21.2-alpine npm run build + docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:lts-alpine npm install --cache /workdir/.npm + docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app node:lts-alpine npm run build --cache /workdir/.npm docker: appjs echo `(env)` diff --git a/ui/app/package.json b/ui/app/package.json index a141328a8..ef24834ca 100644 --- a/ui/app/package.json +++ b/ui/app/package.json @@ -1,6 +1,6 @@ { "name": "postgres-operator-ui", - "version": "1.10.1", + "version": "1.14.0", "description": "PostgreSQL Operator UI", "main": "src/app.js", "config": { diff --git a/ui/app/src/edit.tag.pug b/ui/app/src/edit.tag.pug index d3064ab9f..e51630344 100644 --- a/ui/app/src/edit.tag.pug +++ b/ui/app/src/edit.tag.pug @@ -142,6 +142,7 @@ edit o.spec.enableReplicaConnectionPooler = i.spec.enableReplicaConnectionPooler || false o.spec.enableMasterPoolerLoadBalancer = i.spec.enableMasterPoolerLoadBalancer || false o.spec.enableReplicaPoolerLoadBalancer = i.spec.enableReplicaPoolerLoadBalancer || false + o.spec.maintenanceWindows = i.spec.maintenanceWindows || [] o.spec.volume = { size: i.spec.volume.size, diff --git a/ui/app/src/new.tag.pug b/ui/app/src/new.tag.pug index d4ff3d311..0e687e929 100644 --- a/ui/app/src/new.tag.pug +++ b/ui/app/src/new.tag.pug @@ -331,7 +331,7 @@ new .input-units tr - td Througput + td Throughput td .input-group input.form-control( @@ -594,6 +594,12 @@ new {{#if enableReplicaPoolerLoadBalancer}} enableReplicaPoolerLoadBalancer: true {{/if}} + {{#if maintenanceWindows}} + maintenanceWindows: + {{#each maintenanceWindows}} + - "{{ this }}" + {{/each}} + {{/if}} volume: size: "{{ volumeSize }}Gi"{{#if volumeStorageClass}} storageClass: "{{ volumeStorageClass }}"{{/if}}{{#if iops}} @@ -651,6 +657,7 @@ new enableReplicaConnectionPooler: this.enableReplicaConnectionPooler, enableMasterPoolerLoadBalancer: this.enableMasterPoolerLoadBalancer, enableReplicaPoolerLoadBalancer: this.enableReplicaPoolerLoadBalancer, + maintenanceWindows: this.maintenanceWindows, volumeSize: this.volumeSize, volumeStorageClass: this.volumeStorageClass, iops: this.iops, @@ -727,6 +734,10 @@ new this.enableReplicaPoolerLoadBalancer = !this.enableReplicaPoolerLoadBalancer } + this.maintenanceWindows = e => { + this.maintenanceWindows = e.target.value + } + this.volumeChange = e => { this.volumeSize = +e.target.value } @@ -1042,6 +1053,7 @@ new this.enableReplicaConnectionPooler = false this.enableMasterPoolerLoadBalancer = false this.enableReplicaPoolerLoadBalancer = false + this.maintenanceWindows = {} this.postgresqlVersion = this.postgresqlVersion = ( this.config.postgresql_versions[0] diff --git a/ui/app/src/postgresql.tag.pug b/ui/app/src/postgresql.tag.pug index 960c3bd07..7f91ff525 100644 --- a/ui/app/src/postgresql.tag.pug +++ b/ui/app/src/postgresql.tag.pug @@ -87,9 +87,6 @@ postgresql .alert.alert-info(if='{ progress.statefulSet && !progress.containerFirst }') Waiting for 1st container to spawn .alert.alert-success(if='{ progress.containerFirst }') First PostgreSQL cluster container spawned - .alert.alert-info(if='{ !progress.postgresql }') PostgreSQL cluster manifest pending - .alert.alert-success(if='{ progress.postgresql }') PostgreSQL cluster manifest created - .alert.alert-info(if='{ progress.containerFirst && !progress.masterLabel }') Waiting for master to become available .alert.alert-success(if='{ progress.masterLabel }') PostgreSQL master available, label is attached .alert.alert-success(if='{ progress.masterLabel && progress.dnsName }') PostgreSQL ready: { progress.dnsName } diff --git a/ui/manifests/deployment.yaml b/ui/manifests/deployment.yaml index f6f452340..3b3097416 100644 --- a/ui/manifests/deployment.yaml +++ b/ui/manifests/deployment.yaml @@ -18,7 +18,7 @@ spec: serviceAccountName: postgres-operator-ui containers: - name: "service" - image: registry.opensource.zalan.do/acid/postgres-operator-ui:v1.10.1 + image: ghcr.io/zalando/postgres-operator-ui:v1.14.0 ports: - containerPort: 8081 protocol: "TCP" @@ -45,6 +45,7 @@ spec: - name: "RESOURCES_VISIBLE" value: "False" - name: "TARGET_NAMESPACE" + # Set to "*" to allow viewing/creation of clusters in all namespaces value: "default" - name: "TEAMS" value: |- @@ -72,16 +73,14 @@ spec: "limit_iops": 16000, "limit_throughput": 1000, "postgresql_versions": [ + "17", + "16", "15", "14", - "13", - "12", - "11" + "13" ] } # Exemple of settings to make snapshot view working in the ui when using AWS - # - name: WALE_S3_ENDPOINT - # value: https+path://s3.us-east-1.amazonaws.com:443 # - name: SPILO_S3_BACKUP_PREFIX # value: spilo/ # - name: AWS_ACCESS_KEY_ID @@ -101,5 +100,3 @@ spec: # key: AWS_DEFAULT_REGION # - name: SPILO_S3_BACKUP_BUCKET # value: - # - name: "USE_AWS_INSTANCE_PROFILE" - # value: "true" diff --git a/ui/operator_ui/adapters/__init__.py b/ui/operator_ui/adapters/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ui/operator_ui/adapters/logger.py b/ui/operator_ui/adapters/logger.py new file mode 100644 index 000000000..99166f749 --- /dev/null +++ b/ui/operator_ui/adapters/logger.py @@ -0,0 +1,46 @@ +import logging +from logging.config import dictConfig + +dictConfig( + { + "version": 1, + "disable_existing_loggers": True, + "formatters": { + "json": { + "class": "pythonjsonlogger.jsonlogger.JsonFormatter", + "format": "%(asctime)s %(levelname)s: %(message)s", + } + }, + "handlers": { + "stream_handler": { + "class": "logging.StreamHandler", + "formatter": "json", + "stream": "ext://flask.logging.wsgi_errors_stream", + } + }, + "root": { + "level": "DEBUG", + "handlers": ["stream_handler"] + } + } +) + + +class Logger: + def __init__(self): + self.logger = logging.getLogger(__name__) + + def debug(self, msg: str, *args, **kwargs): + self.logger.debug(msg, *args, **kwargs) + + def info(self, msg: str, *args, **kwargs): + self.logger.info(msg, *args, **kwargs) + + def error(self, msg: str, *args, **kwargs): + self.logger.error(msg, *args, **kwargs) + + def exception(self, msg: str, *args, **kwargs): + self.logger.exception(msg, *args, **kwargs) + + +logger = Logger() diff --git a/ui/operator_ui/main.py b/ui/operator_ui/main.py index 0399f14f8..bf28df6eb 100644 --- a/ui/operator_ui/main.py +++ b/ui/operator_ui/main.py @@ -1,13 +1,8 @@ #!/usr/bin/env python3 # pylama:ignore=E402 -import gevent.monkey - -gevent.monkey.patch_all() - import requests import tokens -import sys from backoff import expo, on_exception from click import ParamType, command, echo, option @@ -16,29 +11,22 @@ Flask, Response, abort, - redirect, render_template, request, send_from_directory, - session, ) -from flask_oauthlib.client import OAuth -from functools import wraps from gevent import sleep, spawn from gevent.pywsgi import WSGIServer from jq import jq from json import dumps, loads -from logging import DEBUG, ERROR, INFO, basicConfig, exception, getLogger from os import getenv from re import X, compile from requests.exceptions import RequestException from signal import SIGTERM, signal -from urllib.parse import urljoin from . import __version__ from .cluster_discovery import DEFAULT_CLUSTERS, StaticClusterDiscoverer -from .oauth import OAuthRemoteAppWithRefresh from .spiloutils import ( apply_postgresql, @@ -62,20 +50,13 @@ these, ) - -# Disable access logs from Flask -getLogger('gevent').setLevel(ERROR) - -logger = getLogger(__name__) +from operator_ui.adapters.logger import logger SERVER_STATUS = {'shutdown': False} APP_URL = getenv('APP_URL') -AUTHORIZE_URL = getenv('AUTHORIZE_URL') SPILO_S3_BACKUP_BUCKET = getenv('SPILO_S3_BACKUP_BUCKET') TEAM_SERVICE_URL = getenv('TEAM_SERVICE_URL') -ACCESS_TOKEN_URL = getenv('ACCESS_TOKEN_URL') -TOKENINFO_URL = getenv('OAUTH2_TOKEN_INFO_URL') OPERATOR_API_URL = getenv('OPERATOR_API_URL', 'http://postgres-operator') OPERATOR_CLUSTER_NAME_LABEL = getenv('OPERATOR_CLUSTER_NAME_LABEL', 'cluster-name') @@ -86,7 +67,7 @@ SUPERUSER_TEAM = getenv('SUPERUSER_TEAM', 'acid') TARGET_NAMESPACE = getenv('TARGET_NAMESPACE') GOOGLE_ANALYTICS = getenv('GOOGLE_ANALYTICS', False) -MIN_PODS= getenv('MIN_PODS', 2) +MIN_PODS = getenv('MIN_PODS', 2) RESOURCES_VISIBLE = getenv('RESOURCES_VISIBLE', True) CUSTOM_MESSAGE_RED = getenv('CUSTOM_MESSAGE_RED', '') @@ -114,14 +95,6 @@ DEFAULT_CPU = getenv('DEFAULT_CPU', '10m') DEFAULT_CPU_LIMIT = getenv('DEFAULT_CPU_LIMIT', '300m') -WALE_S3_ENDPOINT = getenv( - 'WALE_S3_ENDPOINT', - 'https+path://s3.eu-central-1.amazonaws.com:443', -) - -USE_AWS_INSTANCE_PROFILE = ( - getenv('USE_AWS_INSTANCE_PROFILE', 'false').lower() != 'false' -) AWS_ENDPOINT = getenv('AWS_ENDPOINT') @@ -184,38 +157,6 @@ def __call__(self, environ, start_response): return self.app(environ, start_response) -oauth = OAuth(app) - -auth = OAuthRemoteAppWithRefresh( - oauth, - 'auth', - request_token_url=None, - access_token_method='POST', - access_token_url=ACCESS_TOKEN_URL, - authorize_url=AUTHORIZE_URL, -) -oauth.remote_apps['auth'] = auth - - -def verify_token(token): - if not token: - return False - - r = requests.get(TOKENINFO_URL, headers={'Authorization': token}) - - return r.status_code == 200 - - -def authorize(f): - @wraps(f) - def wrapper(*args, **kwargs): - if AUTHORIZE_URL and 'auth_token' not in session: - return redirect(urljoin(APP_URL, '/login')) - return f(*args, **kwargs) - - return wrapper - - def ok(body={}, status=200): return ( Response( @@ -297,19 +238,16 @@ def health(): @app.route('/css/') -@authorize def send_css(path): return send_from_directory('static/', path), 200, STATIC_HEADERS @app.route('/js/') -@authorize def send_js(path): return send_from_directory('static/', path), 200, STATIC_HEADERS @app.route('/') -@authorize def index(): return render_template('index.html', google_analytics=GOOGLE_ANALYTICS) @@ -321,7 +259,7 @@ def index(): 'users_visible': True, 'databases_visible': True, 'resources_visible': RESOURCES_VISIBLE, - 'postgresql_versions': ['11', '12', '13', '14', '15'], + 'postgresql_versions': ['13', '14', '15', '16', '17'], 'dns_format_string': '{0}.{1}', 'pgui_link': '', 'static_network_whitelist': {}, @@ -345,7 +283,6 @@ def index(): @app.route('/config') -@authorize def get_config(): config = DEFAULT_UI_CONFIG.copy() config.update(OPERATOR_UI_CONFIG) @@ -407,17 +344,15 @@ def get_teams_for_user(user_name): @app.route('/teams') -@authorize def get_teams(): return ok( get_teams_for_user( - session.get('user_name', ''), + request.headers.get('X-Uid', ''), ) ) @app.route('/services//') -@authorize def get_service(namespace: str, cluster: str): if TARGET_NAMESPACE not in ['', '*', namespace]: @@ -433,7 +368,6 @@ def get_service(namespace: str, cluster: str): @app.route('/pooler//') -@authorize def get_list_poolers(namespace: str, cluster: str): if TARGET_NAMESPACE not in ['', '*', namespace]: @@ -449,7 +383,6 @@ def get_list_poolers(namespace: str, cluster: str): @app.route('/statefulsets//') -@authorize def get_list_clusters(namespace: str, cluster: str): if TARGET_NAMESPACE not in ['', '*', namespace]: @@ -465,7 +398,6 @@ def get_list_clusters(namespace: str, cluster: str): @app.route('/statefulsets///pods') -@authorize def get_list_members(namespace: str, cluster: str): if TARGET_NAMESPACE not in ['', '*', namespace]: @@ -485,7 +417,6 @@ def get_list_members(namespace: str, cluster: str): @app.route('/namespaces') -@authorize def get_namespaces(): if TARGET_NAMESPACE not in ['', '*']: @@ -503,7 +434,6 @@ def get_namespaces(): @app.route('/postgresqls') -@authorize def get_postgresqls(): postgresqls = [ { @@ -527,6 +457,7 @@ def get_postgresqls(): 'status': status, 'num_elb': spec.get('enableMasterLoadBalancer', 0) + spec.get('enableReplicaLoadBalancer', 0) + \ spec.get('enableMasterPoolerLoadBalancer', 0) + spec.get('enableReplicaPoolerLoadBalancer', 0), + 'maintenance_windows': spec.get('maintenanceWindows', []), } for cluster in these( read_postgresqls( @@ -602,7 +533,6 @@ def run(*args, **kwargs): @app.route('/postgresqls//', methods=['POST']) -@authorize @namespaced def update_postgresql(namespace: str, cluster: str): if READ_ONLY_MODE: @@ -614,8 +544,8 @@ def update_postgresql(namespace: str, cluster: str): postgresql = request.get_json(force=True) - teams = get_teams_for_user(session.get('user_name', '')) - logger.info(f'Changes to: {cluster} by {session.get("user_name", "local-user")}/{teams} {postgresql}') # noqa + teams = get_teams_for_user(request.headers.get('X-Uid', '')) + logger.info(f'Changes to: {cluster} by {request.headers.get("X-Uid", "local-user")}/{teams} {postgresql}') # noqa if SUPERUSER_TEAM and SUPERUSER_TEAM in teams: logger.info(f'Allowing edit due to membership in superuser team {SUPERUSER_TEAM}') # noqa @@ -629,6 +559,11 @@ def update_postgresql(namespace: str, cluster: str): return fail('allowedSourceRanges invalid') spec['allowedSourceRanges'] = postgresql['spec']['allowedSourceRanges'] + if 'maintenanceWindows' in postgresql['spec']: + if not isinstance(postgresql['spec']['maintenanceWindows'], list): + return fail('maintenanceWindows invalid') + spec['maintenanceWindows'] = postgresql['spec']['maintenanceWindows'] + if 'numberOfInstances' in postgresql['spec']: if not isinstance(postgresql['spec']['numberOfInstances'], int): return fail('numberOfInstances invalid') @@ -810,7 +745,6 @@ def update_postgresql(namespace: str, cluster: str): @app.route('/postgresqls//', methods=['GET']) -@authorize def get_postgresql(namespace: str, cluster: str): if TARGET_NAMESPACE not in ['', '*', namespace]: @@ -826,7 +760,6 @@ def get_postgresql(namespace: str, cluster: str): @app.route('/stored_clusters') -@authorize def get_stored_clusters(): return respond( read_stored_clusters( @@ -837,37 +770,30 @@ def get_stored_clusters(): @app.route('/stored_clusters/', methods=['GET']) -@authorize def get_versions(pg_cluster: str): return respond( read_versions( bucket=SPILO_S3_BACKUP_BUCKET, pg_cluster=pg_cluster, prefix=SPILO_S3_BACKUP_PREFIX, - s3_endpoint=WALE_S3_ENDPOINT, - use_aws_instance_profile=USE_AWS_INSTANCE_PROFILE, ), ) - @app.route('/stored_clusters//', methods=['GET']) -@authorize def get_basebackups(pg_cluster: str, uid: str): return respond( read_basebackups( bucket=SPILO_S3_BACKUP_BUCKET, pg_cluster=pg_cluster, prefix=SPILO_S3_BACKUP_PREFIX, - s3_endpoint=WALE_S3_ENDPOINT, uid=uid, - use_aws_instance_profile=USE_AWS_INSTANCE_PROFILE, + postgresql_versions=OPERATOR_UI_CONFIG.get('postgresql_versions', DEFAULT_UI_CONFIG['postgresql_versions']), ), ) @app.route('/create-cluster', methods=['POST']) -@authorize def create_new_cluster(): if READ_ONLY_MODE: @@ -885,8 +811,8 @@ def create_new_cluster(): if TARGET_NAMESPACE not in ['', '*', namespace]: return wrong_namespace() - teams = get_teams_for_user(session.get('user_name', '')) - logger.info(f'Create cluster by {session.get("user_name", "local-user")}/{teams} {postgresql}') # noqa + teams = get_teams_for_user(request.headers.get('X-Uid', '')) + logger.info(f'Create cluster by {request.headers.get("X-Uid", "local-user")}/{teams} {postgresql}') # noqa if SUPERUSER_TEAM and SUPERUSER_TEAM in teams: logger.info(f'Allowing create due to membership in superuser team {SUPERUSER_TEAM}') # noqa @@ -898,7 +824,6 @@ def create_new_cluster(): @app.route('/postgresqls//', methods=['DELETE']) -@authorize def delete_postgresql(namespace: str, cluster: str): if TARGET_NAMESPACE not in ['', '*', namespace]: return wrong_namespace() @@ -910,9 +835,9 @@ def delete_postgresql(namespace: str, cluster: str): if postgresql is None: return not_found() - teams = get_teams_for_user(session.get('user_name', '')) + teams = get_teams_for_user(request.headers.get('X-Uid', '')) - logger.info(f'Delete cluster: {cluster} by {session.get("user_name", "local-user")}/{teams}') # noqa + logger.info(f'Delete cluster: {cluster} by {request.headers.get("X-Uid", "local-user")}/{teams}') # noqa if SUPERUSER_TEAM and SUPERUSER_TEAM in teams: logger.info(f'Allowing delete due to membership in superuser team {SUPERUSER_TEAM}') # noqa @@ -936,78 +861,30 @@ def proxy_operator(url: str): @app.route('/operator/status') -@authorize def get_operator_status(): return proxy_operator('/status/') @app.route('/operator/workers//queue') -@authorize def get_operator_get_queue(worker: int): return proxy_operator(f'/workers/{worker}/queue') @app.route('/operator/workers//logs') -@authorize def get_operator_get_logs(worker: int): return proxy_operator(f'/workers/{worker}/logs') @app.route('/operator/clusters///logs') -@authorize def get_operator_get_logs_per_cluster(namespace: str, cluster: str): return proxy_operator(f'/clusters/{namespace}/{cluster}/logs/') -@app.route('/login') -def login(): - redirect = request.args.get('redirect', False) - if not redirect: - return render_template('login-deeplink.html') - - redirect_uri = urljoin(APP_URL, '/login/authorized') - return auth.authorize(callback=redirect_uri) - - -@app.route('/logout') -def logout(): - session.pop('auth_token', None) - return redirect(urljoin(APP_URL, '/')) - - @app.route('/favicon.png') def favicon(): return send_from_directory('static/', 'favicon-96x96.png'), 200 -@app.route('/login/authorized') -def authorized(): - resp = auth.authorized_response() - if resp is None: - return 'Access denied: reason=%s error=%s' % ( - request.args['error'], - request.args['error_description'] - ) - - if not isinstance(resp, dict): - return 'Invalid auth response' - - session['auth_token'] = (resp['access_token'], '') - - r = requests.get( - TOKENINFO_URL, - headers={ - 'Authorization': f'Bearer {session["auth_token"][0]}', - }, - ) - session['user_name'] = r.json().get('uid') - - logger.info(f'Login from: {session["user_name"]}') - - # return redirect(urljoin(APP_URL, '/')) - return render_template('login-resolve-deeplink.html') - - def shutdown(): # just wait some time to give Kubernetes time to update endpoints # this requires changing the readinessProbe's @@ -1083,28 +960,18 @@ def init_cluster(): help='Verbose logging', is_flag=True, ) -@option( - '--secret-key', - default='development', - envvar='SECRET_KEY', - help='Secret key for session cookies', -) @option( '--clusters', envvar='CLUSTERS', help=f'Comma separated list of Kubernetes API server URLs (default: {DEFAULT_CLUSTERS})', # noqa type=CommaSeparatedValues(), ) -def main(port, secret_key, debug, clusters: list): +def main(port, debug, clusters: list): global TARGET_NAMESPACE - basicConfig(stream=sys.stdout, level=(DEBUG if debug else INFO), format='%(asctime)s %(levelname)s: %(message)s',) - init_cluster() - logger.info(f'Access token URL: {ACCESS_TOKEN_URL}') logger.info(f'App URL: {APP_URL}') - logger.info(f'Authorize URL: {AUTHORIZE_URL}') logger.info(f'Operator API URL: {OPERATOR_API_URL}') logger.info(f'Operator cluster name label: {OPERATOR_CLUSTER_NAME_LABEL}') logger.info(f'Readonly mode: {"enabled" if READ_ONLY_MODE else "disabled"}') # noqa @@ -1113,9 +980,6 @@ def main(port, secret_key, debug, clusters: list): logger.info(f'Superuser team: {SUPERUSER_TEAM}') logger.info(f'Target namespace: {TARGET_NAMESPACE}') logger.info(f'Teamservice URL: {TEAM_SERVICE_URL}') - logger.info(f'Tokeninfo URL: {TOKENINFO_URL}') - logger.info(f'Use AWS instance_profile: {USE_AWS_INSTANCE_PROFILE}') - logger.info(f'WAL-E S3 endpoint: {WALE_S3_ENDPOINT}') logger.info(f'AWS S3 endpoint: {AWS_ENDPOINT}') if TARGET_NAMESPACE is None: @@ -1136,7 +1000,6 @@ def get_target_namespace(): logger.info(f'Target namespace set to: {TARGET_NAMESPACE or "*"}') app.debug = debug - app.secret_key = secret_key signal(SIGTERM, exit_gracefully) diff --git a/ui/operator_ui/oauth.py b/ui/operator_ui/oauth.py deleted file mode 100644 index 34c07fd4f..000000000 --- a/ui/operator_ui/oauth.py +++ /dev/null @@ -1,32 +0,0 @@ -import os - -from flask_oauthlib.client import OAuthRemoteApp - - -CREDENTIALS_DIR = os.getenv('CREDENTIALS_DIR', '') - - -class OAuthRemoteAppWithRefresh(OAuthRemoteApp): - '''Same as flask_oauthlib.client.OAuthRemoteApp, but always loads client credentials from file.''' - - def __init__(self, oauth, name, **kwargs): - # constructor expects some values, so make it happy.. - kwargs['consumer_key'] = 'not-needed-here' - kwargs['consumer_secret'] = 'not-needed-here' - OAuthRemoteApp.__init__(self, oauth, name, **kwargs) - - def refresh_credentials(self): - with open(os.path.join(CREDENTIALS_DIR, 'authcode-client-id')) as fd: - self._consumer_key = fd.read().strip() - with open(os.path.join(CREDENTIALS_DIR, 'authcode-client-secret')) as fd: - self._consumer_secret = fd.read().strip() - - @property - def consumer_key(self): - self.refresh_credentials() - return self._consumer_key - - @property - def consumer_secrect(self): - self.refresh_credentials() - return self._consumer_secret diff --git a/ui/operator_ui/spiloutils.py b/ui/operator_ui/spiloutils.py index 9bbc4e3ba..6a2f03bb2 100644 --- a/ui/operator_ui/spiloutils.py +++ b/ui/operator_ui/spiloutils.py @@ -2,17 +2,13 @@ from datetime import datetime, timezone from furl import furl from json import dumps, loads -from logging import getLogger from os import environ, getenv from requests import Session from urllib.parse import urljoin from uuid import UUID -from wal_e.cmd import configure_backup_cxt -from .utils import Attrs, defaulting, these - - -logger = getLogger(__name__) +from .utils import defaulting, these +from operator_ui.adapters.logger import logger session = Session() @@ -287,10 +283,8 @@ def read_stored_clusters(bucket, prefix, delimiter='/'): def read_versions( pg_cluster, bucket, - s3_endpoint, prefix, delimiter='/', - use_aws_instance_profile=False, ): return [ 'base' if uid == 'wal' else uid @@ -308,35 +302,72 @@ def read_versions( if uid == 'wal' or defaulting(lambda: UUID(uid)) ] -BACKUP_VERSION_PREFIXES = ['', '9.6/', '10/', '11/', '12/', '13/', '14/', '15/'] +def lsn_to_wal_segment_stop(finish_lsn, start_segment, wal_segment_size=16 * 1024 * 1024): + timeline = int(start_segment[:8], 16) + log_id = finish_lsn >> 32 + seg_id = (finish_lsn & 0xFFFFFFFF) // wal_segment_size + return f"{timeline:08X}{log_id:08X}{seg_id:08X}" + +def lsn_to_offset_hex(lsn, wal_segment_size=16 * 1024 * 1024): + return f"{lsn % wal_segment_size:08X}" def read_basebackups( pg_cluster, uid, bucket, - s3_endpoint, prefix, - delimiter='/', - use_aws_instance_profile=False, + postgresql_versions, ): - environ['WALE_S3_ENDPOINT'] = s3_endpoint suffix = '' if uid == 'base' else '/' + uid backups = [] - for vp in BACKUP_VERSION_PREFIXES: - - backups = backups + [ - { - key: value - for key, value in basebackup.__dict__.items() - if isinstance(value, str) or isinstance(value, int) - } - for basebackup in Attrs.call( - f=configure_backup_cxt, - aws_instance_profile=use_aws_instance_profile, - s3_prefix=f's3://{bucket}/{prefix}{pg_cluster}{suffix}/wal/{vp}', - )._backup_list(detail=True) - ] + for vp in postgresql_versions: + backup_prefix = f'{prefix}{pg_cluster}{suffix}/wal/{vp}/basebackups_005/' + logger.info(f"{bucket}/{backup_prefix}") + + paginator = client('s3').get_paginator('list_objects_v2') + pages = paginator.paginate(Bucket=bucket, Prefix=backup_prefix) + + for page in pages: + for obj in page.get("Contents", []): + key = obj["Key"] + if not key.endswith("backup_stop_sentinel.json"): + continue + + response = client('s3').get_object(Bucket=bucket, Key=key) + backup_info = loads(response["Body"].read().decode("utf-8")) + last_modified = response["LastModified"].astimezone(timezone.utc).isoformat() + + backup_name = key.split("/")[-1].replace("_backup_stop_sentinel.json", "") + start_seg, start_offset = backup_name.split("_")[1], backup_name.split("_")[-1] if "_" in backup_name else None + + if "LSN" in backup_info and "FinishLSN" in backup_info: + # WAL-G + lsn = backup_info["LSN"] + finish_lsn = backup_info["FinishLSN"] + backups.append({ + "expanded_size_bytes": backup_info.get("UncompressedSize"), + "last_modified": last_modified, + "name": backup_name, + "wal_segment_backup_start": start_seg, + "wal_segment_backup_stop": lsn_to_wal_segment_stop(finish_lsn, start_seg), + "wal_segment_offset_backup_start": lsn_to_offset_hex(lsn), + "wal_segment_offset_backup_stop": lsn_to_offset_hex(finish_lsn), + }) + elif "wal_segment_backup_stop" in backup_info: + # WAL-E + stop_seg = backup_info["wal_segment_backup_stop"] + stop_offset = backup_info["wal_segment_offset_backup_stop"] + + backups.append({ + "expanded_size_bytes": backup_info.get("expanded_size_bytes"), + "last_modified": last_modified, + "name": backup_name, + "wal_segment_backup_start": start_seg, + "wal_segment_backup_stop": stop_seg, + "wal_segment_offset_backup_start": start_offset, + "wal_segment_offset_backup_stop": stop_offset, + }) return backups diff --git a/ui/operator_ui/templates/index.html b/ui/operator_ui/templates/index.html index 7307c8a3a..60a31e190 100644 --- a/ui/operator_ui/templates/index.html +++ b/ui/operator_ui/templates/index.html @@ -3,7 +3,13 @@ PostgreSQL Operator UI - + diff --git a/ui/operator_ui/templates/login-deeplink.html b/ui/operator_ui/templates/login-deeplink.html deleted file mode 100644 index 875b8d055..000000000 --- a/ui/operator_ui/templates/login-deeplink.html +++ /dev/null @@ -1,13 +0,0 @@ - - - Storing client location ... - - - - - \ No newline at end of file diff --git a/ui/operator_ui/templates/login-resolve-deeplink.html b/ui/operator_ui/templates/login-resolve-deeplink.html deleted file mode 100644 index fac96b265..000000000 --- a/ui/operator_ui/templates/login-resolve-deeplink.html +++ /dev/null @@ -1,18 +0,0 @@ - - - Restoring client location ... - - - - - diff --git a/ui/requirements.txt b/ui/requirements.txt index ae517893a..783c0aac3 100644 --- a/ui/requirements.txt +++ b/ui/requirements.txt @@ -1,15 +1,14 @@ backoff==2.2.1 -boto3==1.26.51 +boto3==1.34.110 boto==2.49.0 -click==8.1.3 -Flask-OAuthlib==0.9.6 -Flask==2.3.2 +click==8.1.7 +Flask==3.0.3 furl==2.1.3 -gevent==22.10.2 -jq==1.4.0 +gevent==24.2.1 +jq==1.7.0 json_delta>=2.0.2 kubernetes==11.0.0 -requests==2.31.0 +python-json-logger==2.0.7 +requests==2.32.2 stups-tokens>=1.1.19 -wal_e==1.1.1 -werkzeug==2.3.3 +werkzeug==3.0.6 diff --git a/ui/run_local.sh b/ui/run_local.sh index c2918505a..37f8b1747 100755 --- a/ui/run_local.sh +++ b/ui/run_local.sh @@ -31,11 +31,11 @@ default_operator_ui_config='{ "limit_iops": 16000, "limit_throughput": 1000, "postgresql_versions": [ + "17", + "16", "15", "14", - "13", - "12", - "11" + "13" ], "static_network_whitelist": { "localhost": ["172.0.0.1/32"] diff --git a/ui/setup.py b/ui/setup.py index 95ddfe182..43a1fb67d 100644 --- a/ui/setup.py +++ b/ui/setup.py @@ -69,7 +69,7 @@ def readme(): 'License :: OSI Approved :: MIT', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.11', 'Topic :: System :: Clustering', 'Topic :: System :: Monitoring', ],