diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 0000000000..40ee71c8e9 --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,14 @@ +on: + pull_request: + branches: + - master + +jobs: + golangci-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: golangci/golangci-lint-action@v2 + with: + version: v1.32 + args: --timeout=5m diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 0000000000..ddb05d51cb --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,17 @@ +on: + pull_request: + branches: + - master + push: + branches: + - master + +jobs: + go-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-go@v2 + with: + go-version: 1.x + - run: PGOROOT=$(pwd) go test ./... diff --git a/.gitignore b/.gitignore index 210f4ef69a..6542af05bc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ .DS_Store /vendor/ tools +licenses/* +!licenses/LICENSE.txt diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 0000000000..937735ce02 --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,22 @@ +# https://golangci-lint.run/usage/configuration/ + +linters: + disable: + - gofumpt + - scopelint + enable: + - gosimple + - misspell + presets: + - bugs + - format + - unused + +linters-settings: + exhaustive: + default-signifies-exhaustive: true + +run: + skip-dirs: + - hack + - pkg/generated diff --git a/LICENSE.md b/LICENSE.md index 90fe0562e2..8ce5664373 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/Makefile b/Makefile index df717f4a70..876512ef1d 100644 --- a/Makefile +++ b/Makefile @@ -1,14 +1,13 @@ # Default values if not already set -ANSIBLE_VERSION ?= 2.9.* PGOROOT ?= $(CURDIR) -PGO_BASEOS ?= centos7 +PGO_BASEOS ?= ubi8 PGO_IMAGE_PREFIX ?= crunchydata PGO_IMAGE_TAG ?= $(PGO_BASEOS)-$(PGO_VERSION) -PGO_VERSION ?= 4.5.0 -PGO_PG_VERSION ?= 12 -PGO_PG_FULLVERSION ?= 12.4 -PGO_BACKREST_VERSION ?= 2.29 +PGO_VERSION ?= 4.6.10 +PGO_PG_VERSION ?= 13 +PGO_PG_FULLVERSION ?= 13.10 +PGO_BACKREST_VERSION ?= 2.31 PACKAGER ?= yum RELTMPDIR=/tmp/release.$(PGO_VERSION) @@ -79,19 +78,16 @@ endif # To build a specific image, run 'make -image' (e.g. 'make pgo-apiserver-image') images = pgo-apiserver \ - pgo-backrest \ - pgo-backrest-repo \ pgo-event \ pgo-rmdata \ pgo-scheduler \ - pgo-sqlrunner \ pgo-client \ pgo-deployer \ crunchy-postgres-exporter \ postgres-operator .PHONY: all installrbac setup setupnamespaces cleannamespaces \ - deployoperator cli-docs clean push pull release + deployoperator cli-docs clean push pull release license #======= Main functions ======= @@ -115,12 +111,11 @@ deployoperator: #======= Binary builds ======= +build: build-postgres-operator build-pgo-apiserver build-pgo-client build-pgo-rmdata build-pgo-scheduler license + build-pgo-apiserver: $(GO_BUILD) -o bin/apiserver ./cmd/apiserver -build-pgo-backrest: - $(GO_BUILD) -o bin/pgo-backrest/pgo-backrest ./cmd/pgo-backrest - build-pgo-rmdata: $(GO_BUILD) -o bin/pgo-rmdata/pgo-rmdata ./cmd/pgo-rmdata @@ -165,7 +160,6 @@ $(PGOROOT)/build/%/Dockerfile: --build-arg PREFIX=$(PGO_IMAGE_PREFIX) \ --build-arg PGVERSION=$(PGO_PG_VERSION) \ --build-arg BACKREST_VERSION=$(PGO_BACKREST_VERSION) \ - --build-arg ANSIBLE_VERSION=$(ANSIBLE_VERSION) \ --build-arg DFSET=$(DFSET) \ --build-arg PACKAGER=$(PACKAGER) \ $(PGOROOT) @@ -182,7 +176,7 @@ endif pgo-base: pgo-base-$(IMGBUILDER) -pgo-base-build: $(PGOROOT)/build/pgo-base/Dockerfile +pgo-base-build: build $(PGOROOT)/build/pgo-base/Dockerfile $(IMGCMDSTEM) \ -f $(PGOROOT)/build/pgo-base/Dockerfile \ -t $(PGO_IMAGE_PREFIX)/pgo-base:$(PGO_IMAGE_TAG) \ @@ -205,6 +199,10 @@ pgo-base-docker: pgo-base-build #======== Utility ======= +check: + rm -rf licenses/*/ + PGOROOT=$(PGOROOT) go test ./... + cli-docs: rm docs/content/pgo-client/reference/*.md cd docs/content/pgo-client/reference && go run ../../../../cmd/pgo/generatedocs.go @@ -214,10 +212,10 @@ cli-docs: rm docs/content/pgo-client/reference/pgo.md clean: clean-deprecated + rm -rf licenses/*/ rm -f bin/apiserver rm -f bin/postgres-operator rm -f bin/pgo bin/pgo-mac bin/pgo.exe - rm -f bin/pgo-backrest/pgo-backrest rm -f bin/pgo-rmdata/pgo-rmdata rm -f bin/pgo-scheduler/pgo-scheduler [ -z "$$(ls hack/tools)" ] || rm hack/tools/* @@ -229,6 +227,9 @@ clean-deprecated: [ ! -n '$(GOBIN)' ] || rm -f $(GOBIN)/postgres-operator $(GOBIN)/apiserver $(GOBIN)/*pgo [ ! -d bin/postgres-operator ] || rm -r bin/postgres-operator +license: + ./bin/license_aggregator.sh + push: $(images:%=push-%) ; push-%: @@ -248,7 +249,6 @@ release: linuxpgo macpgo winpgo cp bin/pgo $(RELTMPDIR) cp bin/pgo-mac $(RELTMPDIR) cp bin/pgo.exe $(RELTMPDIR) - cp $(PGOROOT)/examples/pgo-bash-completion $(RELTMPDIR) tar czvf $(RELFILE) -C $(RELTMPDIR) . generate: diff --git a/README.md b/README.md index f94ec04a6c..fc8056939f 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,22 @@ -

Crunchy Data PostgreSQL Operator

+

PGO: The Postgres Operator from Crunchy Data

- Crunchy Data + PGO: The Postgres Operator from Crunchy Data

[![Go Report Card](https://goreportcard.com/badge/github.com/CrunchyData/postgres-operator)](https://goreportcard.com/report/github.com/CrunchyData/postgres-operator) -# Run your own production-grade PostgreSQL-as-a-Service on Kubernetes! +# Run Cloud Native PostgreSQL on Kubernetes with PGO: The Postgres Operator from Crunchy Data! -The [Crunchy PostgreSQL Operator][documentation] automates and simplifies deploying and managing -open source PostgreSQL clusters on Kubernetes and other Kubernetes-enabled Platforms by providing -the essential features you need to keep your PostgreSQL clusters up and running, including: +[PGO][documentation], the [Postgres Operator][documentation] developed by +[Crunchy Data](https://crunchydata.com/) and included in [Crunchy PostgreSQL for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/), automates and simplifies deploying and managing open source +PostgreSQL clusters on Kubernetes. -#### PostgreSQL Cluster [Provisioning][provisioning] +Whether you need to get a simple Postgres cluster up and running, need to deploy +a high availability, fault tolerant cluster in production, or are running your +own database-as-a-service, the PostgreSQL Operator provides the essential +features you need to keep your cloud native Postgres clusters healthy, including: + +#### Postgres Cluster [Provisioning][provisioning] [Create, Scale, & Delete PostgreSQL clusters with ease][provisioning], while fully customizing your Pods and PostgreSQL configuration! @@ -33,7 +38,7 @@ Set how long you want your backups retained for. Works great with very large dat #### TLS Secure communication between your applications and data servers by [enabling TLS for your PostgreSQL servers][pgo-task-tls], -including the ability to enforce that all of your connections to use TLS. +including the ability to enforce all of your connections to use TLS. #### [Monitoring][monitoring] @@ -61,9 +66,9 @@ Create new clusters from your existing clusters or backups with [`pgo create clu Use [pgBouncer][] for connection pooling -#### Node Affinity +#### Affinity and Tolerations -Have your PostgreSQL clusters deployed to [Kubernetes Nodes][k8s-nodes] of your preference +Have your PostgreSQL clusters deployed to [Kubernetes Nodes][k8s-nodes] of your preference with [node affinity][high-availability-node-affinity], or designate which nodes Kubernetes can schedule PostgreSQL instances to with Kubernetes [tolerations][high-availability-tolerations]. #### Scheduled Backups @@ -76,16 +81,22 @@ the S3 protocol. The PostgreSQL Operator can backup, restore, and create new clu #### Multi-Namespace Support -You can control how the PostgreSQL Operator leverages [Kubernetes Namespaces][k8s-namespaces] with several different deployment models: +You can control how PGO, the Postgres Operator, leverages [Kubernetes Namespaces][k8s-namespaces] with several different deployment models: -- Deploy the PostgreSQL Operator and all PostgreSQL clusters to the same namespace -- Deploy the PostgreSQL Operator to one namespaces, and all PostgreSQL clusters to a different namespace -- Deploy the PostgreSQL Operator to one namespace, and have your PostgreSQL clusters managed across multiple namespaces -- Dynamically add and remove namespaces managed by the PostgreSQL Operator using the `pgo create namespace` and `pgo delete namespace` commands +- Deploy PGO and all PostgreSQL clusters to the same namespace +- Deploy PGO to one namespaces, and all PostgreSQL clusters to a different +namespace +- Deploy PGO to one namespace, and have your PostgreSQL clusters managed across +multiple namespaces +- Dynamically add and remove namespaces managed by the PostgreSQL Operator using +the `pgo` client to run `pgo create namespace` and `pgo delete namespace` #### Full Customizability -The Crunchy PostgreSQL Operator makes it easy to get your own PostgreSQL-as-a-Service up and running on Kubernetes-enabled platforms, but we know that there are further customizations that you can make. As such, the Crunchy PostgreSQL Operator allows you to further customize your deployments, including: +The Postgres Operator (PGO) makes it easy to get Postgres up and running on +Kubernetes-enabled platforms, but we know that there are further customizations +that you can make. As such, PGO allows you to further customize your +deployments, including: - Selecting different storage classes for your primary, replica, and backup storage - Select your own container resources class for each PostgreSQL cluster deployment; differentiate between resources applied for primary and replica clusters! @@ -94,16 +105,17 @@ The Crunchy PostgreSQL Operator makes it easy to get your own PostgreSQL-as-a-Se - Bring your own trusted certificate authority (CA) for use with the Operator API server - Override your PostgreSQL configuration for each cluster - [disaster-recovery]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/disaster-recovery/ [disaster-recovery-s3]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/disaster-recovery/#using-s3 [disaster-recovery-scheduling]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/disaster-recovery/#scheduling-backups [high-availability]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/ +[high-availability-node-affinity]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/#node-affinity [high-availability-sync]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/#synchronous-replication-guarding-against-transactions-loss +[high-availability-tolerations]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/#tolerations [monitoring]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/monitoring/ [multiple-cluster]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/multi-cluster-kubernetes/ [pgo-create-cluster]: https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/reference/pgo_create_cluster/ -[pgo-task-tls]: https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/common-tasks/#enable-tls +[pgo-task-tls]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/tls/ [provisioning]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/provisioning/ [k8s-anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity @@ -111,57 +123,69 @@ The Crunchy PostgreSQL Operator makes it easy to get your own PostgreSQL-as-a-Se [k8s-nodes]: https://kubernetes.io/docs/concepts/architecture/nodes/ [pgBackRest]: https://www.pgbackrest.org -[pgBouncer]: https://access.crunchydata.com/documentation/pgbouncer/ +[pgBouncer]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/pgbouncer/ [pgMonitor]: https://github.com/CrunchyData/pgmonitor ## Deployment Requirements -The PostgreSQL Operator is validated for deployment on Kubernetes, OpenShift, and VMware Enterprise PKS clusters. Some form of storage is required, NFS, hostPath, and Storage Classes are currently supported. +PGO, the Postgres Operator, is validated for deployment on Kubernetes, +OpenShift, GKE, Anthos, AKS, EKS, and VMware Tanzu clusters. PGO is cloud native +and storage agnostic, working with a wide variety of storage classes, hostPath, +and NFS. -The PostgreSQL Operator includes various components that get deployed to your -Kubernetes cluster as shown in the following diagram and detailed -in the Design section of the documentation for the version you are running. +PGO includes various components that get deployed to your Kubernetes cluster as +shown in the following diagram and detailed in the Design section of the +documentation for the version you are running. ![Reference](https://access.crunchydata.com/documentation/postgres-operator/latest/Operator-Architecture.png) -The PostgreSQL Operator is developed and tested on CentOS and RHEL linux platforms but is known to run on other Linux variants. +PGO is developed and tested on CentOS and RHEL linux platforms but is known to +run on other Linux variants. ### Supported Platforms -The Crunchy PostgreSQL Operator is tested on the following Platforms: +PGO, the Postgres Operator, is Kubernetes-native and maintains backwards +compatibility to Kubernetes 1.11 and is tested is tested against the following +platforms: -- Kubernetes 1.13+ -- OpenShift 3.11+ +- Kubernetes 1.17+ +- Openshift 4.4+ +- OpenShift 3.11 - Google Kubernetes Engine (GKE), including Anthos - Amazon EKS -- VMware Enterprise PKS 1.3+ - -### Storage +- Microsoft AKS +- VMware Tanzu -The Crunchy PostgreSQL Operator is tested with a variety of different types of Kubernetes storage and Storage Classes, including: +This list only includes the platforms that the Postgres Operator is specifically +tested on as part of the release process: PGO works on other Kubernetes +distributions as well. -- Google Compute Engine persistent volumes -- HostPath -- NFS -- Rook -- StorageOS +### Storage -and more. +PGO, the Postgres Operator, is tested with a variety of different types of +Kubernetes storage and Storage Classes, as well as hostPath and NFS. -We know there are a variety of different types of [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) available for Kubernetes and we do our best to test each one, but due to the breadth of this area we are unable to verify PostgreSQL Operator functionality in each one. With that said, the PostgreSQL Operator is designed to be storage class agnostic and has been demonstrated to work with additional Storage Classes. +We know there are a variety of different types of [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) +available for Kubernetes and we do our best to test each one, but due to the +breadth of this area we are unable to verify Postgres Operator functionality in +each one. With that said, the PostgreSQL Operator is designed to be storage +class agnostic and has been demonstrated to work with additional Storage +Classes. ## Installation -### PostgreSQL Operator Installation +### Postgres Operator (PGO) Installation -The PostgreSQL Operator provides a few different methods for installation based on your use case. +PGO provides a few different methods for installation methods to get up and +running with cloud native Postgres. -Based on your storage settings in your Kubernetes environment, you may be able to start as quickly as: +Based on your storage settings in your Kubernetes environment, you may be able +to start as quickly as: ```shell kubectl create namespace pgo -kubectl apply -f https://raw.githubusercontent.com/CrunchyData/postgres-operator/v4.5.0/installers/kubectl/postgres-operator.yml +kubectl apply -f https://raw.githubusercontent.com/CrunchyData/postgres-operator/v4.6.10/installers/kubectl/postgres-operator.yml ``` Otherwise, we highly recommend following the instructions from our [Quickstart](https://access.crunchydata.com/documentation/postgres-operator/latest/quickstart/). @@ -176,7 +200,7 @@ Installations methods include: ### `pgo` Client Installation -If you have the PostgreSQL Operator installed in your environment, and are interested in installation of the client interface, please start here: +If you have the Postgres Operator installed in your environment, and are interested in installation of the client interface, please start here: - [pgo Client Install](https://access.crunchydata.com/documentation/postgres-operator/latest/installation/pgo-client/) @@ -189,6 +213,7 @@ There is also a `pgo-client` container if you wish to deploy the client directly - [PostgreSQL](https://www.postgresql.org) - [PostgreSQL Contrib Modules](https://www.postgresql.org/docs/current/contrib.html) - [PL/Python + PL/Python 3](https://www.postgresql.org/docs/current/plpython.html) + - [PL/Perl](https://www.postgresql.org/docs/current/plperl.html) - [pgAudit](https://www.pgaudit.org/) - [pgAudit Analyze](https://github.com/pgaudit/pgaudit_analyze) - [pgnodemx](https://github.com/CrunchyData/pgnodemx) @@ -222,20 +247,24 @@ Additional containers that are not directly integrated with the PostgreSQL Opera For more information about which versions of the PostgreSQL Operator include which components, please visit the [compatibility](https://access.crunchydata.com/documentation/postgres-operator/latest/configuration/compatibility/) section of the documentation. -## Using the PostgreSQL Operator +## Using the PostgreSQL Operator (PGO) -If you are new to the PostgreSQL Operator, you can follow along the [tutorial](https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/) to learn how to install the PostgreSQL Operator and how to use many of its features! +If you are new to PGO, you can follow along the [tutorial](https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/) +to learn how to install the PostgreSQL Operator and how to use many of its +features! - [PostgreSQL Operator Tutorial](https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/) -If you have the PostgreSQL and Client Interface installed in your environment and are interested in guidance on the use of the Crunchy PostgreSQL Operator, please start here: +If you have the PostgreSQL and client interface installed in your environment +and are interested in guidance on the use of the Crunchy PostgreSQL Operator, +please start here: -- [PostgreSQL Operator Documentation](https://access.crunchydata.com/documentation/postgres-operator/) +- [PostgreSQL Operator (PGO) Documentation](https://access.crunchydata.com/documentation/postgres-operator/) - [`pgo` Client User Guide](https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/) ## Contributing to the Project -Want to contribute to the PostgreSQL Operator project? Great! We've put together +Want to contribute to the PGO Project? Great! We've put together as set of contributing guidelines that you can review here: - [Contributing Guidelines](CONTRIBUTING.md) @@ -268,7 +297,7 @@ For other information, please visit the [Support](https://access.crunchydata.com ## Documentation For additional information regarding design, configuration and operation of the -PostgreSQL Operator, pleases see the [Official Project Documentation][documentation]. +PostgreSQL Operator (PGO), please see the [Official Project Documentation][documentation]. If you are looking for the [nightly builds of the documentation](https://crunchydata.github.io/postgres-operator/latest/), you can view them at: @@ -282,7 +311,8 @@ Documentation for previous releases can be found at the [Crunchy Data Access Por ## Releases -When a PostgreSQL Operator general availability (GA) release occurs, the container images are distributed on the following platforms in order: +When a PGO general availability (GA) release occurs, the container images are +distributed on the following platforms in order: - [Crunchy Data Customer Portal](https://access.crunchydata.com/) - [Crunchy Data Developer Portal](https://www.crunchydata.com/developers) @@ -291,3 +321,5 @@ When a PostgreSQL Operator general availability (GA) release occurs, the contain The image rollout can occur over the course of several days. To stay up-to-date on when releases are made available in the [Crunchy Data Developer Portal](https://www.crunchydata.com/developers), please sign up for the [Crunchy Data Developer Program Newsletter](https://www.crunchydata.com/developers/newsletter) + +The PGO Postgres Operator project source code is available subject to the [Apache 2.0 license](LICENSE.md) with the PGO logo and branding assets covered by [our trademark guidelines](docs/static/logos/TRADEMARKS.md). diff --git a/bin/check-deps.sh b/bin/check-deps.sh index fd0d77ce24..5d9f117469 100755 --- a/bin/check-deps.sh +++ b/bin/check-deps.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# Copyright 2020 Crunchy Data Solutions, Inc. +# Copyright 2020 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/bin/crunchy-postgres-exporter/common_lib.sh b/bin/crunchy-postgres-exporter/common_lib.sh index 283352062b..720acb4468 100755 --- a/bin/crunchy-postgres-exporter/common_lib.sh +++ b/bin/crunchy-postgres-exporter/common_lib.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/bin/crunchy-postgres-exporter/start.sh b/bin/crunchy-postgres-exporter/start.sh index f8e02e4094..58c8247d14 100755 --- a/bin/crunchy-postgres-exporter/start.sh +++ b/bin/crunchy-postgres-exporter/start.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -76,19 +76,7 @@ set_default_pg_exporter_env() { trap 'trap_sigterm' SIGINT SIGTERM set_default_postgres_exporter_env - -if [[ ! -v DATA_SOURCE_NAME ]] -then - set_default_pg_exporter_env - if [[ ! -z "${EXPORTER_PG_PARAMS}" ]] - then - EXPORTER_PG_PARAMS="?${EXPORTER_PG_PARAMS}" - fi - export DATA_SOURCE_NAME="postgresql://${EXPORTER_PG_USER}:${EXPORTER_PG_PASSWORD}\ -@${EXPORTER_PG_HOST}:${EXPORTER_PG_PORT}/${EXPORTER_PG_DATABASE}${EXPORTER_PG_PARAMS}" -fi - - +set_default_pg_exporter_env if [[ ! ${#default_exporter_env_vars[@]} -eq 0 ]] then @@ -99,16 +87,16 @@ fi # Check that postgres is accepting connections. echo_info "Waiting for PostgreSQL to be ready.." while true; do - ${PG_DIR?}/bin/pg_isready -d ${DATA_SOURCE_NAME} + ${PG_DIR?}/bin/pg_isready -q -h "${EXPORTER_PG_HOST}" -p "${EXPORTER_PG_PORT}" if [ $? -eq 0 ]; then break fi sleep 2 done -echo_info "Checking if PostgreSQL is accepting queries.." +echo_info "Checking if "${EXPORTER_PG_USER}" is is created.." while true; do - ${PG_DIR?}/bin/psql "${DATA_SOURCE_NAME}" -c "SELECT now();" + PGPASSWORD="${EXPORTER_PG_PASSWORD}" ${PG_DIR?}/bin/psql -q -h "${EXPORTER_PG_HOST}" -p "${EXPORTER_PG_PORT}" -U "${EXPORTER_PG_USER}" -c "SELECT 1;" "${EXPORTER_PG_DATABASE}" if [ $? -eq 0 ]; then break fi @@ -135,7 +123,7 @@ else fi done - VERSION=$(${PG_DIR?}/bin/psql "${DATA_SOURCE_NAME}" -qtAX -c "SELECT current_setting('server_version_num')") + VERSION=$(PGPASSWORD="${EXPORTER_PG_PASSWORD}" ${PG_DIR?}/bin/psql -h "${EXPORTER_PG_HOST}" -p "${EXPORTER_PG_PORT}" -U "${EXPORTER_PG_USER}" -qtAX -c "SELECT current_setting('server_version_num')" "${EXPORTER_PG_DATABASE}") if (( ${VERSION?} >= 90500 )) && (( ${VERSION?} < 90600 )) then if [[ -f ${CONFIG_DIR?}/queries_pg95.yml ]] @@ -144,6 +132,12 @@ else else echo_err "Custom Query file queries_pg95.yml does not exist (it should).." fi + if [[ -f ${CONFIG_DIR?}/queries_pg_stat_statements_pg95.yml ]] + then + cat ${CONFIG_DIR?}/queries_pg_stat_statements_pg95.yml >> /tmp/queries.yml + else + echo_warn "Custom Query file queries_pg_stat_statements_pg95.yml not loaded." + fi elif (( ${VERSION?} >= 90600 )) && (( ${VERSION?} < 100000 )) then if [[ -f ${CONFIG_DIR?}/queries_pg96.yml ]] @@ -152,6 +146,12 @@ else else echo_err "Custom Query file queries_pg96.yml does not exist (it should).." fi + if [[ -f ${CONFIG_DIR?}/queries_pg_stat_statements_pg96.yml ]] + then + cat ${CONFIG_DIR?}/queries_pg_stat_statements_pg96.yml >> /tmp/queries.yml + else + echo_warn "Custom Query file queries_pg_stat_statements_pg96.yml not loaded." + fi elif (( ${VERSION?} >= 100000 )) && (( ${VERSION?} < 110000 )) then if [[ -f ${CONFIG_DIR?}/queries_pg10.yml ]] @@ -160,6 +160,12 @@ else else echo_err "Custom Query file queries_pg10.yml does not exist (it should).." fi + if [[ -f ${CONFIG_DIR?}/queries_pg_stat_statements_pg10.yml ]] + then + cat ${CONFIG_DIR?}/queries_pg_stat_statements_pg10.yml >> /tmp/queries.yml + else + echo_warn "Custom Query file queries_pg_stat_statements_pg10.yml not loaded." + fi elif (( ${VERSION?} >= 110000 )) && (( ${VERSION?} < 120000 )) then if [[ -f ${CONFIG_DIR?}/queries_pg11.yml ]] @@ -168,6 +174,12 @@ else else echo_err "Custom Query file queries_pg11.yml does not exist (it should).." fi + if [[ -f ${CONFIG_DIR?}/queries_pg_stat_statements_pg11.yml ]] + then + cat ${CONFIG_DIR?}/queries_pg_stat_statements_pg11.yml >> /tmp/queries.yml + else + echo_warn "Custom Query file queries_pg_stat_statements_pg11.yml not loaded." + fi elif (( ${VERSION?} >= 120000 )) && (( ${VERSION?} < 130000 )) then if [[ -f ${CONFIG_DIR?}/queries_pg12.yml ]] @@ -176,13 +188,25 @@ else else echo_err "Custom Query file queries_pg12.yml does not exist (it should).." fi + if [[ -f ${CONFIG_DIR?}/queries_pg_stat_statements_pg12.yml ]] + then + cat ${CONFIG_DIR?}/queries_pg_stat_statements_pg12.yml >> /tmp/queries.yml + else + echo_warn "Custom Query file queries_pg_stat_statements_pg12.yml not loaded." + fi elif (( ${VERSION?} >= 130000 )) then if [[ -f ${CONFIG_DIR?}/queries_pg13.yml ]] then cat ${CONFIG_DIR?}/queries_pg13.yml >> /tmp/queries.yml else - echo_err "Custom Query file queries_pg12.yml does not exist (it should).." + echo_err "Custom Query file queries_pg13.yml does not exist (it should).." + fi + if [[ -f ${CONFIG_DIR?}/queries_pg_stat_statements_pg13.yml ]] + then + cat ${CONFIG_DIR?}/queries_pg_stat_statements_pg13.yml >> /tmp/queries.yml + else + echo_warn "Custom Query file queries_pg_stat_statements_pg13.yml not loaded." fi else echo_err "Unknown or unsupported version of PostgreSQL. Exiting.." @@ -190,12 +214,12 @@ else fi fi -sed -i "s/#PGBACKREST_INFO_THROTTLE_MINUTES#/${PGBACKREST_INFO_THROTTLE_MINUTES:-10}/g" /tmp/queries.yml +sed -i -e "s/#PGBACKREST_INFO_THROTTLE_MINUTES#/${PGBACKREST_INFO_THROTTLE_MINUTES:-10}/g" -e "s/#PG_STAT_STATEMENTS_LIMIT#/${PG_STAT_STATEMENTS_LIMIT:-20}/g" /tmp/queries.yml PG_OPTIONS="--extend.query-path=${QUERY_DIR?}/queries.yml --web.listen-address=:${POSTGRES_EXPORTER_PORT}" echo_info "Starting postgres-exporter.." -${PG_EXP_HOME?}/postgres_exporter ${PG_OPTIONS?} >>/dev/stdout 2>&1 & +DATA_SOURCE_URI="${EXPORTER_PG_HOST}:${EXPORTER_PG_PORT}/${EXPORTER_PG_DATABASE}?${EXPORTER_PG_PARAMS}" DATA_SOURCE_USER="${EXPORTER_PG_USER}" DATA_SOURCE_PASS="${EXPORTER_PG_PASSWORD}" ${PG_EXP_HOME?}/postgres_exporter ${PG_OPTIONS?} >>/dev/stdout 2>&1 & echo $! > $POSTGRES_EXPORTER_PIDFILE wait diff --git a/bin/get-deps.sh b/bin/get-deps.sh index edf4a08b81..fb85dd4b2c 100755 --- a/bin/get-deps.sh +++ b/bin/get-deps.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/bin/get-pgmonitor.sh b/bin/get-pgmonitor.sh index cfc178e77d..6d717bd20f 100755 --- a/bin/get-pgmonitor.sh +++ b/bin/get-pgmonitor.sh @@ -1,6 +1,6 @@ #!/bin/bash -e -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -14,7 +14,7 @@ # limitations under the License. echo "Getting pgMonitor..." -PGMONITOR_COMMIT='v4.4-RC7' +PGMONITOR_COMMIT='4.4-1' # pgMonitor Setup if [[ -d ${PGOROOT?}/tools/pgmonitor ]] diff --git a/bin/license_aggregator.sh b/bin/license_aggregator.sh new file mode 100755 index 0000000000..6ba6f46cad --- /dev/null +++ b/bin/license_aggregator.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Inputs / outputs +SCAN_DIR=${GOPATH:-~/go}/pkg/mod +OUT_DIR=licenses + +# Fail on error +set -e + +# Clean up before we start our work +rm -rf $OUT_DIR/*/ + +# Get any file in the vendor directory with the word "license" in it. Note that we'll also keep its path +myLicenses=$(find $SCAN_DIR -type f | grep -i license) +for licensefile in $myLicenses +do + # make a new license directory matching the same vendor structure + licensedir=$(dirname $licensefile) + newlicensedir=$(echo $licensedir | sed "s:$SCAN_DIR:$OUT_DIR:" | sed 's:@[0-9a-zA-Z.\\-]*/:/:' | sed 's:@[0-9a-zA-Z.\\-]*::') + mkdir -p $newlicensedir + # And, copy over the license + cp -f $licensefile $newlicensedir +done + +sudo chmod -R 755 licenses +sudo chmod 0644 licenses/LICENSE.txt diff --git a/bin/pgo-backrest-repo-sync/pgo-backrest-repo-sync.sh b/bin/pgo-backrest-repo-sync/pgo-backrest-repo-sync.sh deleted file mode 100644 index 53e98e3a2e..0000000000 --- a/bin/pgo-backrest-repo-sync/pgo-backrest-repo-sync.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash -x - -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -function trap_sigterm() { - echo "Signal trap triggered, beginning shutdown.." - killall sshd -} - -trap 'trap_sigterm' SIGINT SIGTERM - -# First enable sshd prior to running rsync if using pgbackrest with a repository -# host -enable_sshd() { - SSHD_CONFIG=/sshd - - mkdir ~/.ssh/ - cp $SSHD_CONFIG/config ~/.ssh/ - cp $SSHD_CONFIG/id_ed25519 /tmp - chmod 400 /tmp/id_ed25519 ~/.ssh/config - - # start sshd which is used by pgbackrest for remote connections - /usr/sbin/sshd -D -f $SSHD_CONFIG/sshd_config & - - echo "sleep 5 secs to let sshd come up before running rsync command" - sleep 5 -} - -# Runs rync to sync from a specified source directory to a target directory -rsync_repo() { - echo "rsync pgbackrest from ${1} to ${2}" - # note, the "/" after the repo path is important, as we do not want to sync - # the top level directory - rsync -a --progress "${1}" "${2}" - echo "finished rsync" -} - -# Use the aws cli sync command to sync files from a source location to a target -# location. The this includes syncing files between who s3 locations, -# syncing a local directory to s3, or syncing from s3 to a local directory. -aws_sync_repo() { - export AWS_CA_BUNDLE="${PGBACKREST_REPO1_S3_CA_FILE}" - export AWS_ACCESS_KEY_ID="${PGBACKREST_REPO1_S3_KEY}" - export AWS_SECRET_ACCESS_KEY="${PGBACKREST_REPO1_S3_KEY_SECRET}" - export AWS_DEFAULT_REGION="${PGBACKREST_REPO1_S3_REGION}" - - echo "Executing aws s3 sync from source ${1} to target ${2}" - aws s3 sync "${1}" "${2}" - echo "Finished aws s3 sync" -} - -# If s3 is identifed as the data source, then the aws cli will be utilized to -# sync the repo to the target location in s3. If local storage is also enabled -# (along with s3) for the cluster, then also use the aws cli to sync the repo -# from s3 to the target volume locally. -# -# If the data source is local (the default if not specified at all), then first -# rsync the repo to the target directory locally. Then, if s3 storage is also -# enabled (along with local), use the aws cli to sync the local repo to the -# target s3 location. -if [[ "${BACKREST_STORAGE_SOURCE}" == "s3" ]] -then - aws_source="s3://${PGBACKREST_REPO1_S3_BUCKET}${PGBACKREST_REPO1_PATH}/" - aws_target="s3://${PGBACKREST_REPO1_S3_BUCKET}${NEW_PGBACKREST_REPO}/" - aws_sync_repo "${aws_source}" "${aws_target}" - if [[ "${PGHA_PGBACKREST_LOCAL_S3_STORAGE}" == "true" ]] - then - aws_source="s3://${PGBACKREST_REPO1_S3_BUCKET}${PGBACKREST_REPO1_PATH}/" - aws_target="${NEW_PGBACKREST_REPO}/" - aws_sync_repo "${aws_source}" "${aws_target}" - fi -else - enable_sshd # enable sshd for rsync - - rsync_source="${PGBACKREST_REPO1_HOST}:${PGBACKREST_REPO1_PATH}/" - rsync_target="$NEW_PGBACKREST_REPO" - rsync_repo "${rsync_source}" "${rsync_target}" - if [[ "${PGHA_PGBACKREST_LOCAL_S3_STORAGE}" == "true" ]] - then - aws_source="${NEW_PGBACKREST_REPO}/" - aws_target="s3://${PGBACKREST_REPO1_S3_BUCKET}${NEW_PGBACKREST_REPO}/" - aws_sync_repo "${aws_source}" "${aws_target}" - fi -fi diff --git a/bin/pgo-backrest-repo/archive-push-s3.sh b/bin/pgo-backrest-repo/archive-push-s3.sh deleted file mode 100755 index 2cafa76d90..0000000000 --- a/bin/pgo-backrest-repo/archive-push-s3.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -pgbackrest "$@" diff --git a/bin/pgo-backrest-repo/pgo-backrest-repo.sh b/bin/pgo-backrest-repo/pgo-backrest-repo.sh deleted file mode 100755 index 25fdec5f69..0000000000 --- a/bin/pgo-backrest-repo/pgo-backrest-repo.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash - -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -function trap_sigterm() { - echo "Signal trap triggered, beginning shutdown.." - killall sshd -} - -trap 'trap_sigterm' SIGINT SIGTERM - -echo "Starting the pgBackRest repo" - -CONFIG=/sshd -REPO=/backrestrepo - -if [ ! -d $PGBACKREST_REPO1_PATH ]; then - echo "creating " $PGBACKREST_REPO1_PATH - mkdir -p $PGBACKREST_REPO1_PATH -fi - -# This is a workaround for changes introduced in pgBackRest v2.24. Specifically, a pg1-path -# setting must now be visible when another container executes a pgBackRest command via SSH. -# Since env vars, and therefore the PGBACKREST_DB_PATH setting, is not visible when another -# container executes a command via SSH, this adds the pg1-path setting to the pgBackRest config -# file instead, ensuring the setting is always available in the environment during SSH calls. -# Additionally, since the value for pg1-path setting in the repository container is irrelevant -# (i.e. the value specified by the container running the command via SSH is used instead), it is -# simply set to a dummy directory within the config file. -# If the URI style is set to 'path' instead of the default 'host' value, pgBackRest will -# connect to S3 by prependinging bucket names to URIs instead of the default 'bucket.endpoint' style -# Finally, if TLS verification is set to 'n', pgBackRest disables verification of the S3 server -# certificate. -mkdir -p /tmp/pg1path -if ! grep -Fxq "[${PGBACKREST_STANZA}]" "/etc/pgbackrest/pgbackrest.conf" 2> /dev/null -then - - printf "[%s]\npg1-path=/tmp/pg1path\n" "$PGBACKREST_STANZA" > /etc/pgbackrest/pgbackrest.conf - - # Additionally, if the PGBACKREST S3 variables are set, add them here - if [[ "${PGBACKREST_REPO1_S3_KEY}" != "" ]] - then - printf "repo1-s3-key=%s\n" "${PGBACKREST_REPO1_S3_KEY}" >> /etc/pgbackrest/pgbackrest.conf - fi - - if [[ "${PGBACKREST_REPO1_S3_KEY_SECRET}" != "" ]] - then - printf "repo1-s3-key-secret=%s\n" "${PGBACKREST_REPO1_S3_KEY_SECRET}" >> /etc/pgbackrest/pgbackrest.conf - fi - - if [[ "${PGBACKREST_REPO1_S3_URI_STYLE}" != "" ]] - then - printf "repo1-s3-uri-style=%s\n" "${PGBACKREST_REPO1_S3_URI_STYLE}" >> /etc/pgbackrest/pgbackrest.conf - fi - -fi - -mkdir -p ~/.ssh/ -cp $CONFIG/config ~/.ssh/ -#cp $CONFIG/authorized_keys ~/.ssh/ -cp $CONFIG/id_ed25519 /tmp -chmod 400 /tmp/id_ed25519 ~/.ssh/config - -# start sshd which is used by pgbackrest for remote connections -/usr/sbin/sshd -D -f $CONFIG/sshd_config & - -echo "The pgBackRest repo has been started" - -wait diff --git a/bin/pgo-backrest/.gitignore b/bin/pgo-backrest/.gitignore deleted file mode 100644 index 230c647366..0000000000 --- a/bin/pgo-backrest/.gitignore +++ /dev/null @@ -1 +0,0 @@ -pgo-backrest diff --git a/bin/pgo-backrest/README.txt b/bin/pgo-backrest/README.txt deleted file mode 100644 index 23f92ef4a4..0000000000 --- a/bin/pgo-backrest/README.txt +++ /dev/null @@ -1,3 +0,0 @@ -pgo-backrest binary goes in this directory and gets -copied into the pgo-backrest image, .gitignore is here -to keep the binary from making its way into github diff --git a/bin/pgo-backrest/pgo-backrest.sh b/bin/pgo-backrest/pgo-backrest.sh deleted file mode 100755 index fda20af57c..0000000000 --- a/bin/pgo-backrest/pgo-backrest.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh - -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -/opt/cpm/bin/pgo-backrest - -echo $UID "is the UID in the script" - -chown -R $UID:$UID $PGBACKREST_DB_PATH - -chmod -R o+rx $PGBACKREST_DB_PATH diff --git a/bin/pgo-event/pgo-event.sh b/bin/pgo-event/pgo-event.sh index cddcb2e708..b56b0a775b 100755 --- a/bin/pgo-event/pgo-event.sh +++ b/bin/pgo-event/pgo-event.sh @@ -1,6 +1,6 @@ #!/bin/bash -x -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/bin/pgo-rmdata/start.sh b/bin/pgo-rmdata/start.sh index 95a4903289..7fca61c27d 100755 --- a/bin/pgo-rmdata/start.sh +++ b/bin/pgo-rmdata/start.sh @@ -1,6 +1,6 @@ #!/bin/bash -x -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/bin/pgo-scheduler/start.sh b/bin/pgo-scheduler/start.sh index 4a32cf8bc3..96420fa9aa 100755 --- a/bin/pgo-scheduler/start.sh +++ b/bin/pgo-scheduler/start.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/bin/pgo-sqlrunner/start.sh b/bin/pgo-sqlrunner/start.sh index 0b2eb6d417..9cbe715ad0 100755 --- a/bin/pgo-sqlrunner/start.sh +++ b/bin/pgo-sqlrunner/start.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/bin/pre-pull-crunchy-containers.sh b/bin/pre-pull-crunchy-containers.sh index 5a7031f8e9..2fa49a08f2 100755 --- a/bin/pre-pull-crunchy-containers.sh +++ b/bin/pre-pull-crunchy-containers.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/bin/pull-ccp-from-gcr.sh b/bin/pull-ccp-from-gcr.sh index 0e6dc20aea..17ce4ae360 100755 --- a/bin/pull-ccp-from-gcr.sh +++ b/bin/pull-ccp-from-gcr.sh @@ -8,8 +8,6 @@ IMAGES=( crunchy-postgres-ha crunchy-pgbadger crunchy-pgbouncer - crunchy-pgdump - crunchy-pgrestore ) function echo_green() { diff --git a/bin/pull-from-gcr.sh b/bin/pull-from-gcr.sh index 25e4b267eb..d67feed3ca 100755 --- a/bin/pull-from-gcr.sh +++ b/bin/pull-from-gcr.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -19,13 +19,10 @@ REGISTRY='us.gcr.io/container-suite' VERSION=$PGO_IMAGE_TAG IMAGES=( pgo-event - pgo-backrest-repo pgo-scheduler - pgo-sqlrunner postgres-operator pgo-apiserver pgo-rmdata - pgo-backrest pgo-client pgo-deployer crunchy-postgres-exporter diff --git a/bin/push-ccp-to-gcr.sh b/bin/push-ccp-to-gcr.sh index 3b9de84ed0..f90441a85a 100755 --- a/bin/push-ccp-to-gcr.sh +++ b/bin/push-ccp-to-gcr.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -16,7 +16,7 @@ GCR_IMAGE_PREFIX=gcr.io/crunchy-dev-test CCP_IMAGE_PREFIX=crunchydata -CCP_IMAGE_TAG=centos7-12.4-4.5.0 +CCP_IMAGE_TAG=ubi8-13.10-4.6.10 IMAGES=( crunchy-prometheus diff --git a/bin/push-to-gcr.sh b/bin/push-to-gcr.sh index 3ef6a11199..0a50c56dc6 100755 --- a/bin/push-to-gcr.sh +++ b/bin/push-to-gcr.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -17,13 +17,10 @@ GCR_IMAGE_PREFIX=gcr.io/crunchy-dev-test IMAGES=( pgo-event -pgo-backrest-repo pgo-scheduler -pgo-sqlrunner postgres-operator pgo-apiserver pgo-rmdata -pgo-backrest pgo-client pgo-deployer crunchy-postgres-exporter diff --git a/bin/uid_daemon.sh b/bin/uid_daemon.sh index 83d8aca5e2..6f32959b2f 100755 --- a/bin/uid_daemon.sh +++ b/bin/uid_daemon.sh @@ -1,6 +1,6 @@ #!/usr/bin/bash -# Copyright 2020 Crunchy Data Solutions, Inc. +# Copyright 2020 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/bin/uid_pgbackrest.sh b/bin/uid_pgbackrest.sh deleted file mode 100755 index 3f9c9d1957..0000000000 --- a/bin/uid_pgbackrest.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -if ! whoami &> /dev/null -then - if [[ -w /etc/passwd ]] - then - sed "/pgbackrest:x:2000:/d" /etc/passwd >> /tmp/uid.tmp - cp /tmp/uid.tmp /etc/passwd - rm -f /tmp/uid.tmp - echo "${USER_NAME:-pgbackrest}:x:$(id -u):0:${USER_NAME:-pgbackrest} user:${HOME}:/bin/bash" >> /etc/passwd - fi - - if [[ -w /etc/group ]] - then - sed "/pgbackrest:x:2000/d" /etc/group >> /tmp/gid.tmp - cp /tmp/gid.tmp /etc/group - rm -f /tmp/gid.tmp - echo "nfsnobody:x:65534:" >> /etc/group - echo "pgbackrest:x:$(id -g):pgbackrest" >> /etc/group - fi -fi -exec "$@" diff --git a/bin/upgrade-secret.sh b/bin/upgrade-secret.sh index ee93af1377..2c364389d9 100755 --- a/bin/upgrade-secret.sh +++ b/bin/upgrade-secret.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/build/pgo-apiserver/Dockerfile b/build/pgo-apiserver/Dockerfile index a2ec3c3b3a..505ea1b4a9 100644 --- a/build/pgo-apiserver/Dockerfile +++ b/build/pgo-apiserver/Dockerfile @@ -5,6 +5,10 @@ ARG PGVERSION ARG BACKREST_VERSION FROM ${PREFIX}/pgo-base:${BASEOS}-${BASEVER} +ARG BASEOS +ARG PACKAGER +ARG PGVERSION + LABEL name="pgo-apiserver" \ summary="Crunchy PostgreSQL Operator - Apiserver" \ description="Crunchy PostgreSQL Operator - Apiserver" diff --git a/build/pgo-backrest-repo/Dockerfile b/build/pgo-backrest-repo/Dockerfile deleted file mode 100644 index 0d0e1dd6b8..0000000000 --- a/build/pgo-backrest-repo/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -ARG BASEOS -ARG BASEVER -ARG PREFIX -FROM ${PREFIX}/pgo-base:${BASEOS}-${BASEVER} - -ARG BACKREST_VERSION -ARG PACKAGER -ARG DFSET - -LABEL name="pgo-backrest-repo" \ - summary="Crunchy PostgreSQL Operator - pgBackRest Repository" \ - description="Crunchy PostgreSQL Operator - pgBackRest Repository" - -RUN ${PACKAGER} -y install \ - --setopt=skip_missing_names_on_install=False \ - crunchy-backrest-"${BACKREST_VERSION}" \ - hostname \ - openssh-clients \ - openssh-server \ - procps-ng \ - psmisc \ - rsync \ - && ${PACKAGER} -y clean all - -RUN groupadd pgbackrest -g 2000 && useradd pgbackrest -u 2000 -g 2000 -ADD bin/pgo-backrest-repo /usr/local/bin -RUN chmod +x /usr/local/bin/pgo-backrest-repo.sh /usr/local/bin/archive-push-s3.sh \ - && mkdir -p /opt/cpm/bin /etc/pgbackrest \ - && chown -R pgbackrest:pgbackrest /opt/cpm \ - && chown -R pgbackrest /etc/pgbackrest - -ADD bin/uid_pgbackrest.sh /opt/cpm/bin - -RUN chmod g=u /etc/passwd \ - && chmod g=u /etc/group \ - && chmod -R g=u /etc/pgbackrest \ - && rm -f /run/nologin - -RUN mkdir /.ssh && chown pgbackrest:pgbackrest /.ssh && chmod o+rwx /.ssh - -USER 2000 - -ENTRYPOINT ["/opt/cpm/bin/uid_pgbackrest.sh"] -VOLUME ["/sshd", "/backrestrepo" ] - -CMD ["pgo-backrest-repo.sh"] diff --git a/build/pgo-backrest/Dockerfile b/build/pgo-backrest/Dockerfile deleted file mode 100644 index 25adb20ee3..0000000000 --- a/build/pgo-backrest/Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -ARG BASEOS -ARG BASEVER -ARG PREFIX -FROM ${PREFIX}/pgo-base:${BASEOS}-${BASEVER} - -ARG PGVERSION -ARG BACKREST_VERSION -ARG PACKAGER -ARG DFSET - -LABEL name="pgo-backrest" \ - summary="Crunchy PostgreSQL Operator - pgBackRest" \ - description="pgBackRest image that is integrated for use with Crunchy Data's PostgreSQL Operator." - -RUN ${PACKAGER} -y install \ - --setopt=skip_missing_names_on_install=False \ - postgresql${PGVERSION}-server \ - crunchy-backrest-"${BACKREST_VERSION}" \ - && ${PACKAGER} -y clean all - -RUN mkdir -p /opt/cpm/bin /pgdata /backrestrepo && chown -R 26:26 /opt/cpm -ADD bin/pgo-backrest/ /opt/cpm/bin -ADD bin/uid_postgres.sh /opt/cpm/bin - -RUN chmod g=u /etc/passwd && \ - chmod g=u /etc/group - -USER 26 -ENTRYPOINT ["/opt/cpm/bin/uid_postgres.sh"] -VOLUME ["/pgdata","/backrestrepo"] -CMD ["/opt/cpm/bin/pgo-backrest"] diff --git a/build/pgo-base/Dockerfile b/build/pgo-base/Dockerfile index e9a80bea5c..d852c3855b 100644 --- a/build/pgo-base/Dockerfile +++ b/build/pgo-base/Dockerfile @@ -21,7 +21,6 @@ LABEL vendor="Crunchy Data" \ io.openshift.tags="postgresql,postgres,sql,nosql,crunchy" \ io.k8s.description="Trusted open source PostgreSQL-as-a-Service" -COPY redhat/licenses /licenses COPY redhat/atomic/help.1 /help.1 COPY redhat/atomic/help.md /help.md COPY licenses /licenses diff --git a/build/pgo-deployer/Dockerfile b/build/pgo-deployer/Dockerfile index c2000eaa87..83ae6930eb 100644 --- a/build/pgo-deployer/Dockerfile +++ b/build/pgo-deployer/Dockerfile @@ -4,7 +4,6 @@ ARG PREFIX FROM ${PREFIX}/pgo-base:${BASEOS}-${BASEVER} ARG BASEOS -ARG ANSIBLE_VERSION ARG PACKAGER ARG DFSET @@ -18,11 +17,10 @@ RUN if [ "$DFSET" = "centos" ] ; then \ ${PACKAGER} install -y epel-release \ && ${PACKAGER} -y install \ --setopt=skip_missing_names_on_install=False \ - kubectl \ - ansible-${ANSIBLE_VERSION} \ + kubectl-1.24* \ + ansible \ which \ gettext \ - openssl \ && ${PACKAGER} -y clean all ; \ fi @@ -33,10 +31,9 @@ RUN if [ "$BASEOS" = "rhel7" ] ; then \ --setopt=skip_missing_names_on_install=False \ --enablerepo='rhel-7-server-ose-4.4-rpms' \ openshift-clients \ - ansible-${ANSIBLE_VERSION} \ + ansible \ which \ gettext \ - openssl \ && ${PACKAGER} -y clean all --enablerepo='rhel-7-server-ose-4.4-rpms' ; \ fi @@ -47,10 +44,9 @@ RUN if [ "$BASEOS" = "ubi7" ] ; then \ --setopt=skip_missing_names_on_install=False \ --enablerepo='rhel-7-server-ose-4.4-rpms' \ openshift-clients \ - ansible-${ANSIBLE_VERSION} \ + ansible \ which \ gettext \ - openssl \ && ${PACKAGER} -y clean all --enablerepo='rhel-7-server-ose-4.4-rpms' ; \ fi @@ -61,15 +57,16 @@ RUN if [ "$BASEOS" = "ubi8" ] ; then \ --setopt=skip_missing_names_on_install=False \ --enablerepo='rhocp-4.5-for-rhel-8-x86_64-rpms' \ openshift-clients \ - ansible-${ANSIBLE_VERSION} \ + ansible \ which \ gettext \ - openssl \ + python39-jmespath \ && ${PACKAGER} -y clean all --enablerepo='rhocp-4.5-for-rhel-8-x86_64-rpms' ; \ fi COPY installers/ansible /ansible/postgres-operator COPY installers/metrics/ansible /ansible/metrics +ADD tools/pgmonitor /opt/crunchy/pgmonitor COPY installers/image/bin/pgo-deploy.sh /pgo-deploy.sh COPY bin/uid_daemon.sh /uid_daemon.sh diff --git a/build/pgo-scheduler/Dockerfile b/build/pgo-scheduler/Dockerfile index 49e3700e4e..ab1788a2ff 100644 --- a/build/pgo-scheduler/Dockerfile +++ b/build/pgo-scheduler/Dockerfile @@ -3,6 +3,7 @@ ARG BASEVER ARG PREFIX FROM ${PREFIX}/pgo-base:${BASEOS}-${BASEVER} +ARG BASEOS ARG PGVERSION ARG BACKREST_VERSION ARG PACKAGER @@ -30,6 +31,13 @@ RUN if [ "$DFSET" = "rhel" ] ; then \ && chown -R 2:2 /opt/cpm /pgo-config ; \ fi +RUN if [ "$BASEOS" = "ubi8" ]; then \ + ${PACKAGER} -y install \ + findutils \ + procps \ + && ${PACKAGER} -y clean all ; \ +fi + ADD bin/pgo-scheduler /opt/cpm/bin ADD installers/ansible/roles/pgo-operator/files/pgo-configs /default-pgo-config ADD conf/postgres-operator/pgo.yaml /default-pgo-config/pgo.yaml diff --git a/build/pgo-sqlrunner/Dockerfile b/build/pgo-sqlrunner/Dockerfile deleted file mode 100644 index 5b5dd2c45f..0000000000 --- a/build/pgo-sqlrunner/Dockerfile +++ /dev/null @@ -1,45 +0,0 @@ -ARG BASEOS -ARG BASEVER -ARG PREFIX -FROM ${PREFIX}/pgo-base:${BASEOS}-${BASEVER} - -ARG PGVERSION -ARG BACKREST_VERSION -ARG PACKAGER -ARG DFSET - -LABEL name="pgo-sqlrunner" \ - summary="Crunchy PostgreSQL Operator - SQL Runner" \ - description="Crunchy PostgreSQL Operator - SQL Runner" - -ENV PGROOT="/usr/pgsql-${PGVERSION}" - -RUN if [ "$DFSET" = "centos" ] ; then \ - ${PACKAGER} -y install epel-release \ - && ${PACKAGER} -y install \ - --setopt=skip_missing_names_on_install=False \ - gettext \ - hostname \ - nss_wrapper \ - procps-ng \ - postgresql${PGVERSION} \ - && ${PACKAGER} -y clean all ; \ -fi - -RUN if [ "$DFSET" = "rhel" ] ; then \ - ${PACKAGER} -y install \ - --setopt=skip_missing_names_on_install=False \ - postgresql${PGVERSION} \ - && ${PACKAGER} -y clean all ; \ -fi - -RUN mkdir -p /opt/cpm/bin /opt/cpm/conf /pgconf \ - && chown -R 26:26 /opt/cpm /pgconf - -ADD bin/pgo-sqlrunner /opt/cpm/bin - -VOLUME ["/pgconf"] - -USER 26 - -CMD ["/opt/cpm/bin/start.sh"] diff --git a/build/postgres-operator/Dockerfile b/build/postgres-operator/Dockerfile index d88621d73f..dd9895987e 100644 --- a/build/postgres-operator/Dockerfile +++ b/build/postgres-operator/Dockerfile @@ -28,6 +28,7 @@ RUN if [ "$DFSET" = "rhel" ] ; then \ fi ADD bin/postgres-operator /usr/local/bin +ADD installers/ansible/roles/pgo-operator/files/pgo-backrest-repo /default-pgo-backrest-repo ADD installers/ansible/roles/pgo-operator/files/pgo-configs /default-pgo-config ADD conf/postgres-operator/pgo.yaml /default-pgo-config/pgo.yaml diff --git a/cmd/apiserver/main.go b/cmd/apiserver/main.go index 2c3858bb63..58a9127be7 100644 --- a/cmd/apiserver/main.go +++ b/cmd/apiserver/main.go @@ -1,7 +1,7 @@ package main /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -34,8 +34,10 @@ import ( ) // Created as part of the apiserver.WriteTLSCert call -const serverCertPath = "/tmp/server.crt" -const serverKeyPath = "/tmp/server.key" +const ( + serverCertPath = "/tmp/server.crt" + serverKeyPath = "/tmp/server.key" +) func main() { // Environment-overridden variables @@ -147,8 +149,9 @@ func main() { svrCertFile.Close() } + // #nosec: G402 cfg := &tls.Config{ - //specify pgo-apiserver in the CN....then, add ServerName: "pgo-apiserver", + // specify pgo-apiserver in the CN....then, add ServerName: "pgo-apiserver", ServerName: "pgo-apiserver", ClientAuth: tls.VerifyClientCertIfGiven, InsecureSkipVerify: tlsNoVerify, diff --git a/cmd/pgo-backrest/main.go b/cmd/pgo-backrest/main.go deleted file mode 100644 index 3ea782ab35..0000000000 --- a/cmd/pgo-backrest/main.go +++ /dev/null @@ -1,154 +0,0 @@ -package main - -/* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import ( - "os" - "strconv" - "strings" - - "github.com/crunchydata/postgres-operator/internal/kubeapi" - crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" - log "github.com/sirupsen/logrus" -) - -const backrestCommand = "pgbackrest" - -const backrestBackupCommand = `backup` -const backrestInfoCommand = `info` -const backrestStanzaCreateCommand = `stanza-create` -const containername = "database" -const repoTypeFlagS3 = "--repo1-type=s3" -const noRepoS3VerifyTLS = "--no-repo1-s3-verify-tls" - -func main() { - log.Info("pgo-backrest starts") - - debugFlag := os.Getenv("CRUNCHY_DEBUG") - if debugFlag == "true" { - log.SetLevel(log.DebugLevel) - log.Debug("debug flag set to true") - } else { - log.Info("debug flag set to false") - } - - Namespace := os.Getenv("NAMESPACE") - log.Debugf("setting NAMESPACE to %s", Namespace) - if Namespace == "" { - log.Error("NAMESPACE env var not set") - os.Exit(2) - } - - Command := os.Getenv("COMMAND") - log.Debugf("setting COMMAND to %s", Command) - if Command == "" { - log.Error("COMMAND env var not set") - os.Exit(2) - } - - CommandOpts := os.Getenv("COMMAND_OPTS") - log.Debugf("setting COMMAND_OPTS to %s", CommandOpts) - - PodName := os.Getenv("PODNAME") - log.Debugf("setting PODNAME to %s", PodName) - if PodName == "" { - log.Error("PODNAME env var not set") - os.Exit(2) - } - - RepoType := os.Getenv("PGBACKREST_REPO_TYPE") - log.Debugf("setting REPO_TYPE to %s", RepoType) - - // determine the setting of PGHA_PGBACKREST_LOCAL_S3_STORAGE - // we will discard the error and treat the value as "false" if it is not - // explicitly set - LocalS3Storage, _ := strconv.ParseBool(os.Getenv("PGHA_PGBACKREST_LOCAL_S3_STORAGE")) - log.Debugf("setting PGHA_PGBACKREST_LOCAL_S3_STORAGE to %v", LocalS3Storage) - - // parse the environment variable and store the appropriate boolean value - // we will discard the error and treat the value as "false" if it is not - // explicitly set - S3VerifyTLS, _ := strconv.ParseBool(os.Getenv("PGHA_PGBACKREST_S3_VERIFY_TLS")) - log.Debugf("setting PGHA_PGBACKREST_S3_VERIFY_TLS to %v", S3VerifyTLS) - - client, err := kubeapi.NewClient() - if err != nil { - panic(err) - } - - bashcmd := make([]string, 1) - bashcmd[0] = "bash" - cmdStrs := make([]string, 0) - - switch Command { - case crv1.PgtaskBackrestStanzaCreate: - log.Info("backrest stanza-create command requested") - cmdStrs = append(cmdStrs, backrestCommand) - cmdStrs = append(cmdStrs, backrestStanzaCreateCommand) - cmdStrs = append(cmdStrs, CommandOpts) - case crv1.PgtaskBackrestInfo: - log.Info("backrest info command requested") - cmdStrs = append(cmdStrs, backrestCommand) - cmdStrs = append(cmdStrs, backrestInfoCommand) - cmdStrs = append(cmdStrs, CommandOpts) - case crv1.PgtaskBackrestBackup: - log.Info("backrest backup command requested") - cmdStrs = append(cmdStrs, backrestCommand) - cmdStrs = append(cmdStrs, backrestBackupCommand) - cmdStrs = append(cmdStrs, CommandOpts) - default: - log.Error("unsupported backup command specified " + Command) - os.Exit(2) - } - - if LocalS3Storage { - firstCmd := cmdStrs - cmdStrs = append(cmdStrs, "&&") - cmdStrs = append(cmdStrs, strings.Join(firstCmd, " ")) - cmdStrs = append(cmdStrs, repoTypeFlagS3) - // pass in the flag to disable TLS verification, if set - // otherwise, maintain default behavior and verify TLS - if !S3VerifyTLS { - cmdStrs = append(cmdStrs, noRepoS3VerifyTLS) - } - log.Info("backrest command will be executed for both local and s3 storage") - } else if RepoType == "s3" { - cmdStrs = append(cmdStrs, repoTypeFlagS3) - // pass in the flag to disable TLS verification, if set - // otherwise, maintain default behavior and verify TLS - if !S3VerifyTLS { - cmdStrs = append(cmdStrs, noRepoS3VerifyTLS) - } - log.Info("s3 flag enabled for backrest command") - } - - log.Infof("command to execute is [%s]", strings.Join(cmdStrs, " ")) - - log.Infof("command is %s ", strings.Join(cmdStrs, " ")) - reader := strings.NewReader(strings.Join(cmdStrs, " ")) - output, stderr, err := kubeapi.ExecToPodThroughAPI(client.Config, client, bashcmd, containername, PodName, Namespace, reader) - if err != nil { - log.Info("output=[" + output + "]") - log.Info("stderr=[" + stderr + "]") - log.Error(err) - os.Exit(2) - } - log.Info("output=[" + output + "]") - log.Info("stderr=[" + stderr + "]") - - log.Info("pgo-backrest ends") - -} diff --git a/cmd/pgo-rmdata/README.txt b/cmd/pgo-rmdata/README.txt deleted file mode 100644 index 3361973ff1..0000000000 --- a/cmd/pgo-rmdata/README.txt +++ /dev/null @@ -1,6 +0,0 @@ - -you can test this program outside of a container like so: - -cd $PGOROOT - -go run ./pgo-rmdata/pgo-rmdata.go -pg-cluster=mycluster -replica-name= -namespace=mynamespace -remove-data=true -remove-backup=true -is-replica=false -is-backup=false diff --git a/cmd/pgo-rmdata/main.go b/cmd/pgo-rmdata/main.go index 201b138130..881591e1e6 100644 --- a/cmd/pgo-rmdata/main.go +++ b/cmd/pgo-rmdata/main.go @@ -1,7 +1,7 @@ package main /* -Copyright 2019 - 2020 Crunchy Data +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -65,5 +65,4 @@ func main() { log.Infof("request is %s", request.String()) Delete(request) - } diff --git a/cmd/pgo-rmdata/process.go b/cmd/pgo-rmdata/process.go index 85a7e4ce4b..e3b70f4a19 100644 --- a/cmd/pgo-rmdata/process.go +++ b/cmd/pgo-rmdata/process.go @@ -1,7 +1,7 @@ package main /* -Copyright 2019 - 2020 Crunchy Data +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -43,22 +43,14 @@ const ( configConfigMapSuffix = "config" leaderConfigMapSuffix = "leader" failoverConfigMapSuffix = "failover" + syncConfigMapSuffix = "sync" ) func Delete(request Request) { ctx := context.TODO() log.Infof("rmdata.Process %v", request) - // if, check to see if this is a full cluster removal...i.e. "IsReplica" - // and "IsBackup" is set to false - // - // if this is a full cluster removal, first disable autofailover - if !(request.IsReplica || request.IsBackup) { - log.Debug("disabling autofailover for cluster removal") - util.ToggleAutoFailover(request.Clientset, false, request.ClusterPGHAScope, request.Namespace) - } - - //the case of 'pgo scaledown' + // the case of 'pgo scaledown' if request.IsReplica { log.Info("rmdata.Process scaledown replica use case") removeReplicaServices(request) @@ -66,21 +58,16 @@ func Delete(request Request) { if err != nil { log.Error(err) } - //delete the pgreplica CRD - if err := request.Clientset. - CrunchydataV1().Pgreplicas(request.Namespace). + // delete the pgreplica CRD + if err := request.Clientset.CrunchydataV1().Pgreplicas(request.Namespace). Delete(ctx, request.ReplicaName, metav1.DeleteOptions{}); err != nil { - // If the name of the replica being deleted matches the scope for the cluster, then - // we assume it was the original primary and the pgreplica deletion will fail with - // a not found error. In this case we allow the rmdata process to continue despite - // the error. This allows for the original primary to be scaled down once it is - // is no longer a primary, and has become a replica. - if !(request.ReplicaName == request.ClusterPGHAScope && kerror.IsNotFound(err)) { + // if the pgreplica is not found, assume we're scaling down the original primary and + // continue with removing the replica + if !kerror.IsNotFound(err) { log.Error(err) - return + } else { + log.Debug("pgreplica not found, assuming scale down of original primary") } - log.Debug("replica name matches PGHA scope, assuming scale down of original primary" + - "and therefore ignoring error attempting to delete nonexistent pgreplica") } err = removeReplica(request) @@ -92,13 +79,13 @@ func Delete(request Request) { removePVCs(pvcList, request) } - //scale down is its own use case so we leave when done + // scale down is its own use case so we leave when done return } if request.IsBackup { log.Info("rmdata.Process backup use case") - //the case of removing a backup using `pgo delete backup`, only applies to + // the case of removing a backup using `pgo delete backup`, only applies to // "backup-type=pgdump" removeBackupJobs(request) removeLogicalBackupPVCs(request) @@ -109,29 +96,35 @@ func Delete(request Request) { log.Info("rmdata.Process cluster use case") - // first, clear out any of the scheduled jobs that may occur, as this would be + // attempt to delete the pgcluster object if it has not already been deleted. + // quite possibly, we are here because one deleted the pgcluster object + // already, so this step is optional + if _, err := request.Clientset.CrunchydataV1().Pgclusters(request.Namespace).Get( + ctx, request.ClusterName, metav1.GetOptions{}); err == nil { + if err := request.Clientset.CrunchydataV1().Pgclusters(request.Namespace).Delete( + ctx, request.ClusterName, metav1.DeleteOptions{}); err != nil { + log.Error(err) + } + } + + // clear out any of the scheduled jobs that may occur, as this would be // executing asynchronously against any stale data removeSchedules(request) - //the user had done something like: - //pgo delete cluster mycluster --delete-data + // the user had done something like: + // pgo delete cluster mycluster --delete-data if request.RemoveData { removeUserSecrets(request) } - //handle the case of 'pgo delete cluster mycluster' + // remove the cluster Deployments removeCluster(request) - if err := request.Clientset. - CrunchydataV1().Pgclusters(request.Namespace). - Delete(ctx, request.ClusterName, metav1.DeleteOptions{}); err != nil { - log.Error(err) - } removeServices(request) removeAddons(request) removePgreplicas(request) removePgtasks(request) removeClusterConfigmaps(request) - //removeClusterJobs(request) + // removeClusterJobs(request) if request.RemoveData { if pvcList, err := getInstancePVCs(request); err != nil { log.Error(err) @@ -180,7 +173,7 @@ func removeBackrestRepo(request Request) { log.Error(err) } - //delete the service for the backrest repo + // delete the service for the backrest repo err = request.Clientset. CoreV1().Services(request.Namespace). Delete(ctx, deploymentName, metav1.DeleteOptions{}) @@ -244,6 +237,8 @@ func removeClusterConfigmaps(request Request) { // next, the name of the failover configmap, which is // "`clusterName`-failover" fmt.Sprintf("%s-%s", request.ClusterName, failoverConfigMapSuffix), + // next, if there is a synchronous replication configmap, clean that up + fmt.Sprintf("%s-%s", request.ClusterName, syncConfigMapSuffix), // finally, if there is a pgbouncer, remove the pgbouncer configmap util.GeneratePgBouncerConfigMapName(request.ClusterName), } @@ -269,12 +264,11 @@ func removeCluster(request Request) { selector := fmt.Sprintf("%s=%s,%s!=true", config.LABEL_PG_CLUSTER, request.ClusterName, config.LABEL_PGO_BACKREST_REPO) + // if there is an error here, return as we cannot iterate over the deployment + // list deployments, err := request.Clientset. AppsV1().Deployments(request.Namespace). List(ctx, metav1.ListOptions{LabelSelector: selector}) - - // if there is an error here, return as we cannot iterate over the deployment - // list if err != nil { log.Error(err) return @@ -324,7 +318,7 @@ func removeReplica(request Request) error { return err } - //wait for the deployment to go away fully + // wait for the deployment to go away fully var completed bool for i := 0; i < maximumTries; i++ { _, err = request.Clientset. @@ -346,7 +340,7 @@ func removeReplica(request Request) error { func removeUserSecrets(request Request) { ctx := context.TODO() - //get all that match pg-cluster=db + // get all that match pg-cluster=db selector := config.LABEL_PG_CLUSTER + "=" + request.ClusterName secrets, err := request.Clientset. @@ -365,12 +359,11 @@ func removeUserSecrets(request Request) { } } } - } func removeAddons(request Request) { ctx := context.TODO() - //remove pgbouncer + // remove pgbouncer pgbouncerDepName := request.ClusterName + "-pgbouncer" @@ -379,7 +372,7 @@ func removeAddons(request Request) { AppsV1().Deployments(request.Namespace). Delete(ctx, pgbouncerDepName, metav1.DeleteOptions{PropagationPolicy: &deletePropagation}) - //delete the service name=-pgbouncer + // delete the service name=-pgbouncer _ = request.Clientset. CoreV1().Services(request.Namespace). @@ -389,7 +382,7 @@ func removeAddons(request Request) { func removeServices(request Request) { ctx := context.TODO() - //remove any service for this cluster + // remove any service for this cluster selector := config.LABEL_PG_CLUSTER + "=" + request.ClusterName @@ -409,13 +402,12 @@ func removeServices(request Request) { log.Error(err) } } - } func removePgreplicas(request Request) { ctx := context.TODO() - //get a list of pgreplicas for this cluster + // get a list of pgreplicas for this cluster replicaList, err := request.Clientset.CrunchydataV1().Pgreplicas(request.Namespace).List(ctx, metav1.ListOptions{ LabelSelector: config.LABEL_PG_CLUSTER + "=" + request.ClusterName, }) @@ -433,13 +425,12 @@ func removePgreplicas(request Request) { log.Warn(err) } } - } func removePgtasks(request Request) { ctx := context.TODO() - //get a list of pgtasks for this cluster + // get a list of pgtasks for this cluster taskList, err := request.Clientset. CrunchydataV1().Pgtasks(request.Namespace). List(ctx, metav1.ListOptions{LabelSelector: config.LABEL_PG_CLUSTER + "=" + request.ClusterName}) @@ -455,7 +446,6 @@ func removePgtasks(request Request) { log.Warn(err) } } - } // getInstancePVCs gets all the PVCs that are associated with PostgreSQL @@ -470,11 +460,10 @@ func getInstancePVCs(request Request) ([]string, error) { log.Debugf("instance pvcs overall selector: [%s]", selector) // get all of the PVCs to analyze (see the step below) + // if there is an error, return here and log the error in the calling function pvcs, err := request.Clientset. CoreV1().PersistentVolumeClaims(request.Namespace). List(ctx, metav1.ListOptions{LabelSelector: selector}) - - // if there is an error, return here and log the error in the calling function if err != nil { return pvcList, err } @@ -505,14 +494,14 @@ func getInstancePVCs(request Request) ([]string, error) { return pvcList, nil } -//get the pvc for this replica deployment +// get the pvc for this replica deployment func getReplicaPVC(request Request) ([]string, error) { ctx := context.TODO() pvcList := make([]string, 0) - //at this point, the naming convention is useful - //and ClusterName is the replica deployment name - //when isReplica=true + // at this point, the naming convention is useful + // and ClusterName is the replica deployment name + // when isReplica=true pvcList = append(pvcList, request.ReplicaName) // see if there are any tablespaces or WAL volumes assigned to this replica, @@ -524,11 +513,10 @@ func getReplicaPVC(request Request) ([]string, error) { selector := fmt.Sprintf("%s=%s", config.LABEL_PG_CLUSTER, request.ClusterName) // get all of the PVCs that are specific to this replica and remove them + // if there is an error, return here and log the error in the calling function pvcs, err := request.Clientset. CoreV1().PersistentVolumeClaims(request.Namespace). List(ctx, metav1.ListOptions{LabelSelector: selector}) - - // if there is an error, return here and log the error in the calling function if err != nil { return pvcList, err } @@ -556,7 +544,7 @@ func getReplicaPVC(request Request) ([]string, error) { return pvcList, nil } -func removePVCs(pvcList []string, request Request) error { +func removePVCs(pvcList []string, request Request) { ctx := context.TODO() for _, p := range pvcList { @@ -569,9 +557,6 @@ func removePVCs(pvcList []string, request Request) error { log.Error(err) } } - - return nil - } // removeBackupJobs removes any job associated with a backup. These include: @@ -600,7 +585,6 @@ func removeBackupJobs(request Request) { jobs, err := request.Clientset. BatchV1().Jobs(request.Namespace). List(ctx, metav1.ListOptions{LabelSelector: selector}) - if err != nil { log.Error(err) continue diff --git a/cmd/pgo-rmdata/types.go b/cmd/pgo-rmdata/types.go index 36a95778ff..d0664eb2c5 100644 --- a/cmd/pgo-rmdata/types.go +++ b/cmd/pgo-rmdata/types.go @@ -1,7 +1,7 @@ package main /* -Copyright 2019 - 2020 Crunchy Data +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/cmd/pgo-scheduler/main.go b/cmd/pgo-scheduler/main.go index 4de3ac6cfb..89cbbb65dc 100644 --- a/cmd/pgo-scheduler/main.go +++ b/cmd/pgo-scheduler/main.go @@ -1,7 +1,7 @@ package main /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -19,7 +19,6 @@ import ( "fmt" "os" "os/signal" - "strconv" "syscall" "time" @@ -40,16 +39,15 @@ import ( const ( schedulerLabel = "crunchy-scheduler=true" pgoNamespaceEnv = "PGO_OPERATOR_NAMESPACE" - timeoutEnv = "TIMEOUT" namespaceWorkerCount = 1 ) -var nsRefreshInterval = 10 * time.Minute -var installationName string -var pgoNamespace string -var timeout time.Duration -var seconds int -var clientset kubeapi.Interface +var ( + nsRefreshInterval = 10 * time.Minute + installationName string + pgoNamespace string + clientset kubeapi.Interface +) // NamespaceOperatingMode defines the namespace operating mode for the cluster, // e.g. "dynamic", "readonly" or "disabled". See type NamespaceOperatingMode @@ -61,7 +59,7 @@ func init() { log.SetLevel(log.InfoLevel) debugFlag := os.Getenv("CRUNCHY_DEBUG") - //add logging configuration + // add logging configuration crunchylog.CrunchyLogger(crunchylog.SetParameters()) if debugFlag == "true" { log.SetLevel(log.DebugLevel) @@ -82,20 +80,6 @@ func init() { log.WithFields(log.Fields{}).Fatalf("Failed to get PGO_OPERATOR_NAMESPACE environment: %s", pgoNamespaceEnv) } - secondsEnv := os.Getenv(timeoutEnv) - seconds = 300 - if secondsEnv == "" { - log.WithFields(log.Fields{}).Info("No timeout set, defaulting to 300 seconds") - } else { - seconds, err = strconv.Atoi(secondsEnv) - if err != nil { - log.WithFields(log.Fields{}).Fatalf("Failed to convert timeout env to seconds: %s", err) - } - } - - log.WithFields(log.Fields{}).Infof("Setting timeout to: %d", seconds) - timeout = time.Second * time.Duration(seconds) - clientset, err = kubeapi.NewClient() if err != nil { log.WithFields(log.Fields{}).Fatalf("Failed to connect to kubernetes: %s", err) @@ -116,7 +100,7 @@ func init() { func main() { log.Info("Starting Crunchy Scheduler") - //give time for pgo-event to start up + // give time for pgo-event to start up time.Sleep(time.Duration(5) * time.Second) scheduler := sched.New(schedulerLabel, pgoNamespace, clientset) @@ -150,7 +134,7 @@ func main() { log.WithFields(log.Fields{}).Fatalf("Failed to create controller manager: %s", err) os.Exit(2) } - controllerManager.RunAll() + _ = controllerManager.RunAll() // if the namespace operating mode is not disabled, then create and start a namespace // controller @@ -211,7 +195,6 @@ func setNamespaceOperatingMode(clientset kubernetes.Interface) error { // createAndStartNamespaceController creates a namespace controller and then starts it func createAndStartNamespaceController(kubeClientset kubernetes.Interface, controllerManager controller.Manager, stopCh <-chan struct{}) error { - nsKubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClientset, nsRefreshInterval, kubeinformers.WithTweakListOptions(func(options *metav1.ListOptions) { diff --git a/cmd/pgo-scheduler/scheduler/configmapcontroller.go b/cmd/pgo-scheduler/scheduler/configmapcontroller.go index 41372f96b5..30a72db2d0 100644 --- a/cmd/pgo-scheduler/scheduler/configmapcontroller.go +++ b/cmd/pgo-scheduler/scheduler/configmapcontroller.go @@ -1,7 +1,7 @@ package scheduler /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -62,7 +62,6 @@ func (c *Controller) onDelete(obj interface{}) { // AddConfigMapEventHandler adds the pgcluster event handler to the pgcluster informer func (c *Controller) AddConfigMapEventHandler() { - c.Informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.onAdd, DeleteFunc: c.onDelete, diff --git a/cmd/pgo-scheduler/scheduler/controllermanager.go b/cmd/pgo-scheduler/scheduler/controllermanager.go index 41c09ef8ef..84fb34fcc8 100644 --- a/cmd/pgo-scheduler/scheduler/controllermanager.go +++ b/cmd/pgo-scheduler/scheduler/controllermanager.go @@ -1,7 +1,7 @@ package scheduler /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -57,7 +57,6 @@ type controllerGroup struct { // namespace included in the 'namespaces' parameter. func NewControllerManager(namespaces []string, scheduler *Scheduler, installationName string, namespaceOperatingMode ns.NamespaceOperatingMode) (*ControllerManager, error) { - controllerManager := ControllerManager{ controllers: make(map[string]*controllerGroup), installationName: installationName, @@ -87,7 +86,6 @@ func NewControllerManager(namespaces []string, scheduler *Scheduler, installatio // informers for this resource. Each controller group also receives its own clients, which can then // be utilized by the controller within the controller group. func (c *ControllerManager) AddGroup(namespace string) error { - c.mgrMutex.Lock() defer c.mgrMutex.Unlock() @@ -103,7 +101,6 @@ func (c *ControllerManager) AddGroup(namespace string) error { // AddAndRunGroup is a convenience function that adds a controller group for the // namespace specified, and then immediately runs the controllers in that group. func (c *ControllerManager) AddAndRunGroup(namespace string) error { - if c.controllers[namespace] != nil { // first try to clean if one is not already in progress if err := c.clean(namespace); err != nil { @@ -137,7 +134,6 @@ func (c *ControllerManager) AddAndRunGroup(namespace string) error { // RemoveAll removes all controller groups managed by the controller manager, first stopping all // controllers within each controller group managed by the controller manager. func (c *ControllerManager) RemoveAll() { - c.mgrMutex.Lock() defer c.mgrMutex.Unlock() @@ -151,7 +147,6 @@ func (c *ControllerManager) RemoveAll() { // RemoveGroup removes the controller group for the namespace specified, first stopping all // controllers within that group func (c *ControllerManager) RemoveGroup(namespace string) { - c.mgrMutex.Lock() defer c.mgrMutex.Unlock() @@ -160,7 +155,6 @@ func (c *ControllerManager) RemoveGroup(namespace string) { // RunAll runs all controllers across all controller groups managed by the controller manager. func (c *ControllerManager) RunAll() error { - c.mgrMutex.Lock() defer c.mgrMutex.Unlock() @@ -177,7 +171,6 @@ func (c *ControllerManager) RunAll() error { // RunGroup runs the controllers within the controller group for the namespace specified. func (c *ControllerManager) RunGroup(namespace string) error { - c.mgrMutex.Lock() defer c.mgrMutex.Unlock() @@ -198,7 +191,6 @@ func (c *ControllerManager) RunGroup(namespace string) error { // addControllerGroup adds a new controller group for the namespace specified func (c *ControllerManager) addControllerGroup(namespace string) error { - if _, ok := c.controllers[namespace]; ok { log.Debugf("Controller Manager: a controller for namespace %s already exists", namespace) return controller.ErrControllerGroupExists @@ -241,7 +233,6 @@ func (c *ControllerManager) addControllerGroup(namespace string) error { // clean removes and controller groups that no longer correspond to a valid namespace within // the Kubernetes cluster, e.g. in the event that a namespace has been deleted. func (c *ControllerManager) clean(namespace string) error { - if !c.sem.TryAcquire(1) { return fmt.Errorf("controller group clean already in progress, namespace %s will not "+ "clean", namespace) @@ -278,7 +269,6 @@ func (c *ControllerManager) clean(namespace string) error { // hasListerPrivs verifies the Operator has the privileges required to start the controllers // for the namespace specified. func (c *ControllerManager) hasListerPrivs(namespace string) bool { - controllerGroup := c.controllers[namespace] var err error @@ -301,7 +291,6 @@ func (c *ControllerManager) hasListerPrivs(namespace string) bool { // runControllerGroup is responsible running the controllers for the controller group corresponding // to the namespace provided func (c *ControllerManager) runControllerGroup(namespace string) error { - controllerGroup := c.controllers[namespace] hasListerPrivs := c.hasListerPrivs(namespace) @@ -335,7 +324,6 @@ func (c *ControllerManager) runControllerGroup(namespace string) error { // queues associated with the controllers inside of the controller group are first shutdown // prior to removing the controller group. func (c *ControllerManager) removeControllerGroup(namespace string) { - if _, ok := c.controllers[namespace]; !ok { log.Debugf("Controller Manager: no controller group to remove for ns %s", namespace) return @@ -351,7 +339,6 @@ func (c *ControllerManager) removeControllerGroup(namespace string) { // done by calling the ShutdownWorker function associated with the controller. If the controller // does not have a ShutdownWorker function then no action is taken. func (c *ControllerManager) stopControllerGroup(namespace string) { - if _, ok := c.controllers[namespace]; !ok { log.Debugf("Controller Manager: unable to stop controller group for namespace %s because "+ "a controller group for this namespace does not exist", namespace) diff --git a/cmd/pgo-scheduler/scheduler/pgbackrest.go b/cmd/pgo-scheduler/scheduler/pgbackrest.go index eba3048da8..77b390aa55 100644 --- a/cmd/pgo-scheduler/scheduler/pgbackrest.go +++ b/cmd/pgo-scheduler/scheduler/pgbackrest.go @@ -1,7 +1,7 @@ package scheduler /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -22,8 +22,10 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/wait" ) @@ -62,7 +64,8 @@ func (b BackRestBackupJob) Run() { "container": b.container, "backupType": b.backupType, "cluster": b.cluster, - "storageType": b.storageType}) + "storageType": b.storageType, + }) contextLogger.Info("Running pgBackRest backup") @@ -76,11 +79,11 @@ func (b BackRestBackupJob) Run() { taskName := fmt.Sprintf("%s-%s-sch-backup", b.cluster, b.backupType) - //if the cluster is found, check for an annotation indicating it has not been upgraded - //if the annotation does not exist, then it is a new cluster and proceed as usual - //if the annotation is set to "true", the cluster has already been upgraded and can proceed but - //if the annotation is set to "false", this cluster will need to be upgraded before proceeding - //log the issue, then return + // if the cluster is found, check for an annotation indicating it has not been upgraded + // if the annotation does not exist, then it is a new cluster and proceed as usual + // if the annotation is set to "true", the cluster has already been upgraded and can proceed but + // if the annotation is set to "false", this cluster will need to be upgraded before proceeding + // log the issue, then return if cluster.Annotations[config.ANNOTATION_IS_UPGRADED] == config.ANNOTATIONS_FALSE { contextLogger.WithFields(log.Fields{ "task": taskName, @@ -115,17 +118,20 @@ func (b BackRestBackupJob) Run() { return } - selector := fmt.Sprintf("%s=%s,pgo-backrest-repo=true", config.LABEL_PG_CLUSTER, b.cluster) - pods, err := clientset.CoreV1().Pods(b.namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + selector := fmt.Sprintf("%s=%s,%s", config.LABEL_PG_CLUSTER, b.cluster, config.LABEL_PGO_BACKREST_REPO) + options := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), + LabelSelector: selector, + } + + pods, err := clientset.CoreV1().Pods(b.namespace).List(ctx, options) if err != nil { contextLogger.WithFields(log.Fields{ "selector": selector, "error": err, }).Error("error getting pods from selector") return - } - - if len(pods.Items) != 1 { + } else if len(pods.Items) != 1 { contextLogger.WithFields(log.Fields{ "selector": selector, "error": err, @@ -142,7 +148,7 @@ func (b BackRestBackupJob) Run() { backupOptions: fmt.Sprintf("--type=%s %s", b.backupType, b.options), stanza: b.stanza, storageType: b.storageType, - imagePrefix: cluster.Spec.PGOImagePrefix, + imagePrefix: cluster.Spec.CCPImagePrefix, } _, err = clientset.CrunchydataV1().Pgtasks(b.namespace). diff --git a/cmd/pgo-scheduler/scheduler/policy.go b/cmd/pgo-scheduler/scheduler/policy.go index bb81969951..c170a222db 100644 --- a/cmd/pgo-scheduler/scheduler/policy.go +++ b/cmd/pgo-scheduler/scheduler/policy.go @@ -1,7 +1,7 @@ package scheduler /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -60,7 +60,8 @@ func (p PolicyJob) Run() { contextLogger := log.WithFields(log.Fields{ "namespace": p.namespace, "policy": p.policy, - "cluster": p.cluster}) + "cluster": p.cluster, + }) contextLogger.Info("Running Policy schedule") @@ -98,7 +99,7 @@ func (p PolicyJob) Run() { data := make(map[string]string) data[filename] = string(policy.Spec.SQL) - var labels = map[string]string{ + labels := map[string]string{ "pg-cluster": p.cluster, } labels["pg-cluster"] = p.cluster @@ -134,19 +135,21 @@ func (p PolicyJob) Run() { policyJob := PolicyTemplate{ JobName: name, ClusterName: p.cluster, - PGOImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, p.ccpImagePrefix), - PGOImageTag: p.ccpImageTag, + CCPImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, p.ccpImagePrefix), + CCPImageTag: p.ccpImageTag, PGHost: p.cluster, PGPort: cluster.Spec.Port, PGDatabase: p.database, PGSQLConfigMap: name, PGUserSecret: p.secret, + Tolerations: util.GetTolerations(cluster.Spec.Tolerations), } var doc bytes.Buffer if err := config.PolicyJobTemplate.Execute(&doc, policyJob); err != nil { contextLogger.WithFields(log.Fields{ - "error": err}).Error("Failed to render job template") + "error": err, + }).Error("Failed to render job template") return } @@ -177,7 +180,7 @@ func (p PolicyJob) Run() { } // set the container image to an override value, if one exists - operator.SetContainerImageOverride(config.CONTAINER_IMAGE_PGO_SQL_RUNNER, + operator.SetContainerImageOverride(config.CONTAINER_IMAGE_CRUNCHY_POSTGRES_HA, &newJob.Spec.Template.Spec.Containers[0]) _, err = clientset.BatchV1().Jobs(p.namespace).Create(ctx, newJob, metav1.CreateOptions{}) diff --git a/cmd/pgo-scheduler/scheduler/scheduler.go b/cmd/pgo-scheduler/scheduler/scheduler.go index a0f9b3dc46..0887985554 100644 --- a/cmd/pgo-scheduler/scheduler/scheduler.go +++ b/cmd/pgo-scheduler/scheduler/scheduler.go @@ -1,7 +1,7 @@ package scheduler /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -32,8 +32,8 @@ import ( func New(label, namespace string, client kubeapi.Interface) *Scheduler { clientset = client cronClient := cv3.New() - cronClient.AddFunc("* * * * *", phony) - cronClient.AddFunc("* * * * *", heartbeat) + _, _ = cronClient.AddFunc("* * * * *", phony) + _, _ = cronClient.AddFunc("* * * * *", heartbeat) return &Scheduler{ namespace: namespace, @@ -56,17 +56,17 @@ func (s *Scheduler) AddSchedule(config *v1.ConfigMap) error { var schedule ScheduleTemplate for _, data := range config.Data { if err := json.Unmarshal([]byte(data), &schedule); err != nil { - return fmt.Errorf("Failed unmarhsaling configMap: %s", err) + return fmt.Errorf("Failed unmarhsaling configMap: %w", err) } } if err := validate(schedule); err != nil { - return fmt.Errorf("Failed to validate schedule: %s", err) + return fmt.Errorf("Failed to validate schedule: %w", err) } id, err := s.schedule(schedule) if err != nil { - return fmt.Errorf("Failed to schedule configmap: %s", err) + return fmt.Errorf("Failed to schedule configmap: %w", err) } log.WithFields(log.Fields{ @@ -117,7 +117,8 @@ func phony() { // heartbeat modifies a sentinel file used as part of the liveness test // for the scheduler func heartbeat() { - err := ioutil.WriteFile("/tmp/scheduler.hb", []byte(time.Now().String()), 0644) + // #nosec: G303 + err := ioutil.WriteFile("/tmp/scheduler.hb", []byte(time.Now().String()), 0o600) if err != nil { log.Errorln("error writing heartbeat file: ", err) } diff --git a/cmd/pgo-scheduler/scheduler/tasks.go b/cmd/pgo-scheduler/scheduler/tasks.go index a2c715d3be..30560aff8b 100644 --- a/cmd/pgo-scheduler/scheduler/tasks.go +++ b/cmd/pgo-scheduler/scheduler/tasks.go @@ -1,7 +1,7 @@ package scheduler /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/cmd/pgo-scheduler/scheduler/types.go b/cmd/pgo-scheduler/scheduler/types.go index 3838e4d994..67fad2f8c2 100644 --- a/cmd/pgo-scheduler/scheduler/types.go +++ b/cmd/pgo-scheduler/scheduler/types.go @@ -1,7 +1,7 @@ package scheduler /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -63,11 +63,12 @@ type Policy struct { type PolicyTemplate struct { JobName string ClusterName string - PGOImagePrefix string - PGOImageTag string + CCPImagePrefix string + CCPImageTag string PGHost string PGPort string PGDatabase string PGUserSecret string PGSQLConfigMap string + Tolerations string } diff --git a/cmd/pgo-scheduler/scheduler/validate.go b/cmd/pgo-scheduler/scheduler/validate.go index 35bddb7e78..8af1ee8b09 100644 --- a/cmd/pgo-scheduler/scheduler/validate.go +++ b/cmd/pgo-scheduler/scheduler/validate.go @@ -1,7 +1,7 @@ package scheduler /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/cmd/pgo-scheduler/scheduler/validate_test.go b/cmd/pgo-scheduler/scheduler/validate_test.go index a7401a00d7..176bcf3e22 100644 --- a/cmd/pgo-scheduler/scheduler/validate_test.go +++ b/cmd/pgo-scheduler/scheduler/validate_test.go @@ -1,7 +1,7 @@ package scheduler /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/cmd/pgo/api/backrest.go b/cmd/pgo/api/backrest.go index 13e2ff702b..ad8a8d4505 100644 --- a/cmd/pgo/api/backrest.go +++ b/cmd/pgo/api/backrest.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" "net/http" @@ -25,8 +26,51 @@ import ( log "github.com/sirupsen/logrus" ) -func ShowBackrest(httpclient *http.Client, arg, selector string, SessionCredentials *msgs.BasicAuthCredentials, ns string) (msgs.ShowBackrestResponse, error) { +// DeleteBackup makes an API requests to delete a pgBackRest backup +func DeleteBackup(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request msgs.DeleteBackrestBackupRequest) (msgs.DeleteBackrestBackupResponse, error) { + var response msgs.DeleteBackrestBackupResponse + + // explicitly set the client version here + request.ClientVersion = msgs.PGO_VERSION + + log.Debugf("DeleteBackup called [%+v]", request) + + ctx := context.TODO() + jsonValue, _ := json.Marshal(request) + url := SessionCredentials.APIServerURL + "/backrest" + + action := "DELETE" + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) + if err != nil { + return response, err + } + + req.Header.Set("Content-Type", "application/json") + req.SetBasicAuth(SessionCredentials.Username, SessionCredentials.Password) + + resp, err := httpclient.Do(req) + if err != nil { + return response, err + } + + defer resp.Body.Close() + + log.Debugf("%+v", resp) + + if err := StatusCheck(resp); err != nil { + return response, err + } + + if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { + fmt.Print("Error: ") + fmt.Println(err) + return response, err + } + + return response, nil +} +func ShowBackrest(httpclient *http.Client, arg, selector string, SessionCredentials *msgs.BasicAuthCredentials, ns string) (msgs.ShowBackrestResponse, error) { var response msgs.ShowBackrestResponse url := SessionCredentials.APIServerURL + "/backrest/" + arg + "?version=" + msgs.PGO_VERSION + "&selector=" + selector + "&namespace=" + ns @@ -58,11 +102,9 @@ func ShowBackrest(httpclient *http.Client, arg, selector string, SessionCredenti } return response, err - } func CreateBackrestBackup(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreateBackrestBackupRequest) (msgs.CreateBackrestBackupResponse, error) { - var response msgs.CreateBackrestBackupResponse jsonValue, _ := json.Marshal(request) diff --git a/cmd/pgo/api/cat.go b/cmd/pgo/api/cat.go index 00d17c7fb6..9e159844e5 100644 --- a/cmd/pgo/api/cat.go +++ b/cmd/pgo/api/cat.go @@ -1,7 +1,7 @@ package api /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,13 +18,13 @@ package api import ( "bytes" "encoding/json" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func Cat(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CatRequest) (msgs.CatResponse, error) { - var response msgs.CatResponse jsonValue, _ := json.Marshal(request) diff --git a/cmd/pgo/api/cluster.go b/cmd/pgo/api/cluster.go index 74407c0dbc..817d5a884e 100644 --- a/cmd/pgo/api/cluster.go +++ b/cmd/pgo/api/cluster.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" "net/http" @@ -33,15 +34,15 @@ const ( ) func ShowCluster(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.ShowClusterRequest) (msgs.ShowClusterResponse, error) { - var response msgs.ShowClusterResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := fmt.Sprintf(showClusterURL, SessionCredentials.APIServerURL) log.Debugf("showCluster called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -68,20 +69,19 @@ func ShowCluster(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCred } return response, err - } func DeleteCluster(httpclient *http.Client, request *msgs.DeleteClusterRequest, SessionCredentials *msgs.BasicAuthCredentials) (msgs.DeleteClusterResponse, error) { - var response msgs.DeleteClusterResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := fmt.Sprintf(deleteClusterURL, SessionCredentials.APIServerURL) log.Debugf("delete cluster called %s", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -110,19 +110,18 @@ func DeleteCluster(httpclient *http.Client, request *msgs.DeleteClusterRequest, } return response, err - } func CreateCluster(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreateClusterRequest) (msgs.CreateClusterResponse, error) { - var response msgs.CreateClusterResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := fmt.Sprintf(createClusterURL, SessionCredentials.APIServerURL) log.Debugf("createCluster called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -152,15 +151,14 @@ func CreateCluster(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCr } func UpdateCluster(httpclient *http.Client, request *msgs.UpdateClusterRequest, SessionCredentials *msgs.BasicAuthCredentials) (msgs.UpdateClusterResponse, error) { - //func UpdateCluster(httpclient *http.Client, arg, selector string, SessionCredentials *msgs.BasicAuthCredentials, autofailFlag, ns string) (msgs.UpdateClusterResponse, error) { - var response msgs.UpdateClusterResponse jsonValue, _ := json.Marshal(request) + ctx := context.TODO() url := fmt.Sprintf(updateClusterURL, SessionCredentials.APIServerURL) log.Debugf("update cluster called %s", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -189,5 +187,4 @@ func UpdateCluster(httpclient *http.Client, request *msgs.UpdateClusterRequest, } return response, err - } diff --git a/cmd/pgo/api/common.go b/cmd/pgo/api/common.go index b541272e94..a17299c37d 100644 --- a/cmd/pgo/api/common.go +++ b/cmd/pgo/api/common.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/cmd/pgo/api/config.go b/cmd/pgo/api/config.go index 90848edcfd..1915c3b0d3 100644 --- a/cmd/pgo/api/config.go +++ b/cmd/pgo/api/config.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,21 +16,23 @@ package api */ import ( + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowConfig(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, ns string) (msgs.ShowConfigResponse, error) { - var response msgs.ShowConfigResponse + ctx := context.TODO() url := SessionCredentials.APIServerURL + "/config?version=" + msgs.PGO_VERSION + "&namespace=" + ns log.Debug(url) - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return response, err } @@ -59,5 +61,4 @@ func ShowConfig(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCrede } return response, err - } diff --git a/cmd/pgo/api/df.go b/cmd/pgo/api/df.go index fa993051aa..0851118d9b 100644 --- a/cmd/pgo/api/df.go +++ b/cmd/pgo/api/df.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" "net/http" @@ -33,12 +34,12 @@ func ShowDf(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentia log.Debugf("ShowDf called [%+v]", request) + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/df" action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) - + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -47,7 +48,6 @@ func ShowDf(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentia req.SetBasicAuth(SessionCredentials.Username, SessionCredentials.Password) resp, err := httpclient.Do(req) - if err != nil { return response, err } diff --git a/cmd/pgo/api/failover.go b/cmd/pgo/api/failover.go index 4ebbab9471..41bac507e1 100644 --- a/cmd/pgo/api/failover.go +++ b/cmd/pgo/api/failover.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,24 +17,26 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func CreateFailover(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreateFailoverRequest) (msgs.CreateFailoverResponse, error) { - var response msgs.CreateFailoverResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/failover" log.Debugf("create failover called [%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -63,15 +65,15 @@ func CreateFailover(httpclient *http.Client, SessionCredentials *msgs.BasicAuthC } func QueryFailover(httpclient *http.Client, arg string, SessionCredentials *msgs.BasicAuthCredentials, ns string) (msgs.QueryFailoverResponse, error) { - var response msgs.QueryFailoverResponse + ctx := context.TODO() url := SessionCredentials.APIServerURL + "/failover/" + arg + "?version=" + msgs.PGO_VERSION + "&namespace=" + ns log.Debugf("query failover called [%s]", url) action := "GET" - req, err := http.NewRequest(action, url, nil) + req, err := http.NewRequestWithContext(ctx, action, url, nil) if err != nil { return response, err } @@ -97,5 +99,4 @@ func QueryFailover(httpclient *http.Client, arg string, SessionCredentials *msgs } return response, err - } diff --git a/cmd/pgo/api/label.go b/cmd/pgo/api/label.go index e083f998a8..303710c7bf 100644 --- a/cmd/pgo/api/label.go +++ b/cmd/pgo/api/label.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,13 +18,13 @@ package api import ( "bytes" "encoding/json" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func LabelClusters(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.LabelRequest) (msgs.LabelResponse, error) { - var response msgs.LabelResponse url := SessionCredentials.APIServerURL + "/label" log.Debugf("label called...[%s]", url) @@ -61,7 +61,6 @@ func LabelClusters(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCr } func DeleteLabel(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.DeleteLabelRequest) (msgs.LabelResponse, error) { - var response msgs.LabelResponse url := SessionCredentials.APIServerURL + "/labeldelete" log.Debugf("delete label called...[%s]", url) diff --git a/cmd/pgo/api/namespace.go b/cmd/pgo/api/namespace.go index 96f10ba8d7..96dc49617a 100644 --- a/cmd/pgo/api/namespace.go +++ b/cmd/pgo/api/namespace.go @@ -1,7 +1,7 @@ package api /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,23 +17,25 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowNamespace(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.ShowNamespaceRequest) (msgs.ShowNamespaceResponse, error) { - var resp msgs.ShowNamespaceResponse resp.Status.Code = msgs.Ok + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/namespace" log.Debugf("ShowNamespace called...[%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { resp.Status.Code = msgs.Error return resp, err @@ -61,19 +63,18 @@ func ShowNamespace(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCr } return resp, err - } func CreateNamespace(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreateNamespaceRequest) (msgs.CreateNamespaceResponse, error) { - var resp msgs.CreateNamespaceResponse resp.Status.Code = msgs.Ok + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/namespacecreate" log.Debugf("CreateNamespace called...[%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { resp.Status.Code = msgs.Error return resp, err @@ -107,15 +108,15 @@ func CreateNamespace(httpclient *http.Client, SessionCredentials *msgs.BasicAuth } func DeleteNamespace(httpclient *http.Client, request *msgs.DeleteNamespaceRequest, SessionCredentials *msgs.BasicAuthCredentials) (msgs.DeleteNamespaceResponse, error) { - var response msgs.DeleteNamespaceResponse url := SessionCredentials.APIServerURL + "/namespacedelete" log.Debugf("DeleteNamespace called [%s]", url) + ctx := context.TODO() jsonValue, _ := json.Marshal(request) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -142,18 +143,18 @@ func DeleteNamespace(httpclient *http.Client, request *msgs.DeleteNamespaceReque } return response, err - } -func UpdateNamespace(httpclient *http.Client, request *msgs.UpdateNamespaceRequest, SessionCredentials *msgs.BasicAuthCredentials) (msgs.UpdateNamespaceResponse, error) { +func UpdateNamespace(httpclient *http.Client, request *msgs.UpdateNamespaceRequest, SessionCredentials *msgs.BasicAuthCredentials) (msgs.UpdateNamespaceResponse, error) { var response msgs.UpdateNamespaceResponse url := SessionCredentials.APIServerURL + "/namespaceupdate" log.Debugf("UpdateNamespace called [%s]", url) + ctx := context.TODO() jsonValue, _ := json.Marshal(request) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -180,5 +181,4 @@ func UpdateNamespace(httpclient *http.Client, request *msgs.UpdateNamespaceReque } return response, err - } diff --git a/cmd/pgo/api/pgadmin.go b/cmd/pgo/api/pgadmin.go index 0d410355cd..279420a612 100644 --- a/cmd/pgo/api/pgadmin.go +++ b/cmd/pgo/api/pgadmin.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package api import ( "bytes" + "context" "encoding/json" "io/ioutil" "net/http" @@ -29,12 +30,13 @@ import ( func CreatePgAdmin(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreatePgAdminRequest) (msgs.CreatePgAdminResponse, error) { var response msgs.CreatePgAdminResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgadmin" log.Debugf("createPgAdmin called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -68,12 +70,13 @@ func CreatePgAdmin(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCr func DeletePgAdmin(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.DeletePgAdminRequest) (msgs.DeletePgAdminResponse, error) { var response msgs.DeletePgAdminResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgadmin" log.Debugf("deletePgAdmin called...[%s]", url) action := "DELETE" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -117,13 +120,13 @@ func ShowPgAdmin(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCred log.Debugf("ShowPgAdmin called [%+v]", request) // put the request into JSON format and format the URL and HTTP verb + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgadmin/show" action := "POST" // prepare the request! - httpRequest, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) - + httpRequest, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) // if there is an error preparing the request, return here if err != nil { return msgs.ShowPgAdminResponse{}, err diff --git a/cmd/pgo/api/pgbouncer.go b/cmd/pgo/api/pgbouncer.go index efee86ca53..c227956241 100644 --- a/cmd/pgo/api/pgbouncer.go +++ b/cmd/pgo/api/pgbouncer.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,22 +17,24 @@ package api import ( "bytes" + "context" "encoding/json" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func CreatePgbouncer(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreatePgbouncerRequest) (msgs.CreatePgbouncerResponse, error) { - var response msgs.CreatePgbouncerResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgbouncer" log.Debugf("createPgbouncer called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -61,15 +63,15 @@ func CreatePgbouncer(httpclient *http.Client, SessionCredentials *msgs.BasicAuth } func DeletePgbouncer(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.DeletePgbouncerRequest) (msgs.DeletePgbouncerResponse, error) { - var response msgs.DeletePgbouncerResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgbouncer" log.Debugf("deletePgbouncer called...[%s]", url) action := "DELETE" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -108,13 +110,13 @@ func ShowPgBouncer(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCr log.Debugf("ShowPgBouncer called [%+v]", request) // put the request into JSON format and format the URL and HTTP verb + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgbouncer/show" action := "POST" // prepare the request! - httpRequest, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) - + httpRequest, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) // if there is an error preparing the request, return here if err != nil { return msgs.ShowPgBouncerResponse{}, err @@ -127,7 +129,6 @@ func ShowPgBouncer(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCr // make the request! if there is an error making the request, return httpResponse, err := httpclient.Do(httpRequest) - if err != nil { return msgs.ShowPgBouncerResponse{}, err } @@ -162,13 +163,13 @@ func UpdatePgBouncer(httpclient *http.Client, SessionCredentials *msgs.BasicAuth log.Debugf("UpdatePgBouncer called [%+v]", request) // put the request into JSON format and format the URL and HTTP verb + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgbouncer" action := "PUT" // prepare the request! - httpRequest, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) - + httpRequest, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) // if there is an error preparing the request, return here if err != nil { return msgs.UpdatePgBouncerResponse{}, err @@ -181,7 +182,6 @@ func UpdatePgBouncer(httpclient *http.Client, SessionCredentials *msgs.BasicAuth // make the request! if there is an error making the request, return httpResponse, err := httpclient.Do(httpRequest) - if err != nil { return msgs.UpdatePgBouncerResponse{}, err } diff --git a/cmd/pgo/api/pgdump.go b/cmd/pgo/api/pgdump.go index 3bc0804c7b..18ae1fa903 100644 --- a/cmd/pgo/api/pgdump.go +++ b/cmd/pgo/api/pgdump.go @@ -1,7 +1,7 @@ package api /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" "net/http" @@ -26,14 +27,14 @@ import ( ) func ShowpgDump(httpclient *http.Client, arg, selector string, SessionCredentials *msgs.BasicAuthCredentials, ns string) (msgs.ShowBackupResponse, error) { - var response msgs.ShowBackupResponse url := SessionCredentials.APIServerURL + "/pgdump/" + arg + "?version=" + msgs.PGO_VERSION + "&selector=" + selector + "&namespace=" + ns log.Debugf("show pgdump called [%s]", url) + ctx := context.TODO() action := "GET" - req, err := http.NewRequest(action, url, nil) + req, err := http.NewRequestWithContext(ctx, action, url, nil) if err != nil { return response, err } @@ -58,13 +59,12 @@ func ShowpgDump(httpclient *http.Client, arg, selector string, SessionCredential } return response, err - } func CreatepgDumpBackup(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreatepgDumpBackupRequest) (msgs.CreatepgDumpBackupResponse, error) { - var response msgs.CreatepgDumpBackupResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgdumpbackup" @@ -72,7 +72,7 @@ func CreatepgDumpBackup(httpclient *http.Client, SessionCredentials *msgs.BasicA log.Debugf("create pgdump backup called [%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } diff --git a/cmd/pgo/api/pgorole.go b/cmd/pgo/api/pgorole.go index 804f0c1eb2..4154145ecd 100644 --- a/cmd/pgo/api/pgorole.go +++ b/cmd/pgo/api/pgorole.go @@ -1,7 +1,7 @@ package api /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,22 +17,24 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowPgorole(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.ShowPgoroleRequest) (msgs.ShowPgoroleResponse, error) { - var response msgs.ShowPgoroleResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgoroleshow" log.Debugf("ShowPgorole called...[%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -58,18 +60,18 @@ func ShowPgorole(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCred } return response, err - } -func CreatePgorole(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreatePgoroleRequest) (msgs.CreatePgoroleResponse, error) { +func CreatePgorole(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreatePgoroleRequest) (msgs.CreatePgoroleResponse, error) { var resp msgs.CreatePgoroleResponse resp.Status.Code = msgs.Ok + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgorolecreate" log.Debugf("CreatePgorole called...[%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { resp.Status.Code = msgs.Error return resp, err @@ -103,15 +105,15 @@ func CreatePgorole(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCr } func DeletePgorole(httpclient *http.Client, request *msgs.DeletePgoroleRequest, SessionCredentials *msgs.BasicAuthCredentials) (msgs.DeletePgoroleResponse, error) { - var response msgs.DeletePgoroleResponse url := SessionCredentials.APIServerURL + "/pgoroledelete" log.Debugf("DeletePgorole called [%s]", url) + ctx := context.TODO() jsonValue, _ := json.Marshal(request) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -138,18 +140,17 @@ func DeletePgorole(httpclient *http.Client, request *msgs.DeletePgoroleRequest, } return response, err - } func UpdatePgorole(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.UpdatePgoroleRequest) (msgs.UpdatePgoroleResponse, error) { - var response msgs.UpdatePgoroleResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgoroleupdate" log.Debugf("UpdatePgorole called...[%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } diff --git a/cmd/pgo/api/pgouser.go b/cmd/pgo/api/pgouser.go index e0026d20ca..82b0bd9ffa 100644 --- a/cmd/pgo/api/pgouser.go +++ b/cmd/pgo/api/pgouser.go @@ -1,7 +1,7 @@ package api /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,22 +17,24 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowPgouser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.ShowPgouserRequest) (msgs.ShowPgouserResponse, error) { - var response msgs.ShowPgouserResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgousershow" log.Debugf("ShowPgouser called...[%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -58,18 +60,18 @@ func ShowPgouser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCred } return response, err - } -func CreatePgouser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreatePgouserRequest) (msgs.CreatePgouserResponse, error) { +func CreatePgouser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreatePgouserRequest) (msgs.CreatePgouserResponse, error) { var resp msgs.CreatePgouserResponse resp.Status.Code = msgs.Ok + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgousercreate" log.Debugf("CreatePgouser called...[%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { resp.Status.Code = msgs.Error return resp, err @@ -103,15 +105,15 @@ func CreatePgouser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCr } func DeletePgouser(httpclient *http.Client, request *msgs.DeletePgouserRequest, SessionCredentials *msgs.BasicAuthCredentials) (msgs.DeletePgouserResponse, error) { - var response msgs.DeletePgouserResponse url := SessionCredentials.APIServerURL + "/pgouserdelete" log.Debugf("DeletePgouser called [%s]", url) + ctx := context.TODO() jsonValue, _ := json.Marshal(request) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -138,18 +140,17 @@ func DeletePgouser(httpclient *http.Client, request *msgs.DeletePgouserRequest, } return response, err - } func UpdatePgouser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.UpdatePgouserRequest) (msgs.UpdatePgouserResponse, error) { - var response msgs.UpdatePgouserResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/pgouserupdate" log.Debugf("UpdatePgouser called...[%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } diff --git a/cmd/pgo/api/policy.go b/cmd/pgo/api/policy.go index b7e9cf5d6f..54ae74060e 100644 --- a/cmd/pgo/api/policy.go +++ b/cmd/pgo/api/policy.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,23 +17,25 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowPolicy(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.ShowPolicyRequest) (msgs.ShowPolicyResponse, error) { - var response msgs.ShowPolicyResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/showpolicies" log.Debugf("showPolicy called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -59,19 +61,19 @@ func ShowPolicy(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCrede } return response, err - } -func CreatePolicy(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreatePolicyRequest) (msgs.CreatePolicyResponse, error) { +func CreatePolicy(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreatePolicyRequest) (msgs.CreatePolicyResponse, error) { var resp msgs.CreatePolicyResponse resp.Status.Code = msgs.Ok + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/policies" log.Debugf("createPolicy called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { resp.Status.Code = msgs.Error return resp, err @@ -105,16 +107,16 @@ func CreatePolicy(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCre } func DeletePolicy(httpclient *http.Client, request *msgs.DeletePolicyRequest, SessionCredentials *msgs.BasicAuthCredentials) (msgs.DeletePolicyResponse, error) { - var response msgs.DeletePolicyResponse url := SessionCredentials.APIServerURL + "/policiesdelete" log.Debugf("delete policy called [%s]", url) + ctx := context.TODO() action := "POST" jsonValue, _ := json.Marshal(request) - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -141,19 +143,18 @@ func DeletePolicy(httpclient *http.Client, request *msgs.DeletePolicyRequest, Se } return response, err - } func ApplyPolicy(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.ApplyPolicyRequest) (msgs.ApplyPolicyResponse, error) { - var response msgs.ApplyPolicyResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/policies/apply" log.Debugf("applyPolicy called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } diff --git a/cmd/pgo/api/pvc.go b/cmd/pgo/api/pvc.go index f4fac4ceb4..6b385efd96 100644 --- a/cmd/pgo/api/pvc.go +++ b/cmd/pgo/api/pvc.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,17 +17,19 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowPVC(httpclient *http.Client, request *msgs.ShowPVCRequest, SessionCredentials *msgs.BasicAuthCredentials) (msgs.ShowPVCResponse, error) { - var response msgs.ShowPVCResponse + ctx := context.TODO() url := SessionCredentials.APIServerURL + "/showpvc" log.Debugf("ShowPVC called...[%s]", url) @@ -35,7 +37,7 @@ func ShowPVC(httpclient *http.Client, request *msgs.ShowPVCRequest, SessionCrede log.Debugf("ShowPVC called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -61,5 +63,4 @@ func ShowPVC(httpclient *http.Client, request *msgs.ShowPVCRequest, SessionCrede } return response, err - } diff --git a/cmd/pgo/api/reload.go b/cmd/pgo/api/reload.go index 9235cc1ea9..81368841ce 100644 --- a/cmd/pgo/api/reload.go +++ b/cmd/pgo/api/reload.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,13 +18,13 @@ package api import ( "bytes" "encoding/json" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func Reload(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.ReloadRequest) (msgs.ReloadResponse, error) { - var response msgs.ReloadResponse jsonValue, _ := json.Marshal(request) diff --git a/cmd/pgo/api/restart.go b/cmd/pgo/api/restart.go index 13dc205972..69f1ddb781 100644 --- a/cmd/pgo/api/restart.go +++ b/cmd/pgo/api/restart.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" "net/http" @@ -29,12 +30,12 @@ import ( // a PG cluster or one or more instances within it. func Restart(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.RestartRequest) (msgs.RestartResponse, error) { - var response msgs.RestartResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := fmt.Sprintf("%s/%s", SessionCredentials.APIServerURL, "restart") - req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -69,11 +70,11 @@ func Restart(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredenti // cluster specified. func QueryRestart(httpclient *http.Client, clusterName string, SessionCredentials *msgs.BasicAuthCredentials, namespace string) (msgs.QueryRestartResponse, error) { - var response msgs.QueryRestartResponse + ctx := context.TODO() url := fmt.Sprintf("%s/%s/%s", SessionCredentials.APIServerURL, "restart", clusterName) - req, err := http.NewRequest(http.MethodGet, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return response, err } diff --git a/cmd/pgo/api/restore.go b/cmd/pgo/api/restore.go index e22cea904b..67c9dfc073 100644 --- a/cmd/pgo/api/restore.go +++ b/cmd/pgo/api/restore.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,13 +18,13 @@ package api import ( "bytes" "encoding/json" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func Restore(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.RestoreRequest) (msgs.RestoreResponse, error) { - var response msgs.RestoreResponse jsonValue, _ := json.Marshal(request) diff --git a/cmd/pgo/api/restoreDump.go b/cmd/pgo/api/restoreDump.go index bd911c1b75..8cff5f35f1 100644 --- a/cmd/pgo/api/restoreDump.go +++ b/cmd/pgo/api/restoreDump.go @@ -1,7 +1,7 @@ package api /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,13 +18,13 @@ package api import ( "bytes" "encoding/json" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func RestoreDump(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.PgRestoreRequest) (msgs.RestoreResponse, error) { - var response msgs.RestoreResponse jsonValue, _ := json.Marshal(request) diff --git a/cmd/pgo/api/scale.go b/cmd/pgo/api/scale.go index 6defb09127..8cdd8a04cc 100644 --- a/cmd/pgo/api/scale.go +++ b/cmd/pgo/api/scale.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,60 +16,52 @@ package api */ import ( + "bytes" + "context" "encoding/json" "fmt" "net/http" - "strconv" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" ) -func ScaleCluster(httpclient *http.Client, arg string, ReplicaCount int, - StorageConfig, NodeLabel, CCPImageTag, ServiceType string, - SessionCredentials *msgs.BasicAuthCredentials, ns string) (msgs.ClusterScaleResponse, error) { +func ScaleCluster(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, + request msgs.ClusterScaleRequest) (msgs.ClusterScaleResponse, error) { + response := msgs.ClusterScaleResponse{} + ctx := context.TODO() + request.ClientVersion = msgs.PGO_VERSION - var response msgs.ClusterScaleResponse + url := fmt.Sprintf("%s/clusters/scale/%s", SessionCredentials.APIServerURL, request.Name) + jsonValue, _ := json.Marshal(request) - url := fmt.Sprintf("%s/clusters/scale/%s", SessionCredentials.APIServerURL, arg) - log.Debug(url) - - action := "GET" - req, err := http.NewRequest(action, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } - q := req.URL.Query() - q.Add("replica-count", strconv.Itoa(ReplicaCount)) - q.Add("storage-config", StorageConfig) - q.Add("node-label", NodeLabel) - q.Add("version", msgs.PGO_VERSION) - q.Add("ccp-image-tag", CCPImageTag) - q.Add("service-type", ServiceType) - q.Add("namespace", ns) - req.URL.RawQuery = q.Encode() - + req.Header.Set("Content-Type", "application/json") req.SetBasicAuth(SessionCredentials.Username, SessionCredentials.Password) resp, err := httpclient.Do(req) if err != nil { return response, err } + defer resp.Body.Close() + log.Debugf("%v", resp) - err = StatusCheck(resp) - if err != nil { + + if err := StatusCheck(resp); err != nil { return response, err } if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { - log.Printf("%v\n", resp.Body) + log.Debugf("%+v", resp.Body) fmt.Println("Error: ", err) log.Println(err) return response, err } return response, err - } diff --git a/cmd/pgo/api/scaledown.go b/cmd/pgo/api/scaledown.go index 1cc6691b72..8035e3c7d9 100644 --- a/cmd/pgo/api/scaledown.go +++ b/cmd/pgo/api/scaledown.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,6 +16,7 @@ package api */ import ( + "context" "encoding/json" "fmt" "net/http" @@ -29,13 +30,13 @@ import ( func ScaleDownCluster(httpclient *http.Client, clusterName, ScaleDownTarget string, DeleteData bool, SessionCredentials *msgs.BasicAuthCredentials, ns string) (msgs.ScaleDownResponse, error) { - var response msgs.ScaleDownResponse url := fmt.Sprintf("%s/scaledown/%s", SessionCredentials.APIServerURL, clusterName) log.Debug(url) + ctx := context.TODO() action := "GET" - req, err := http.NewRequest(action, url, nil) + req, err := http.NewRequestWithContext(ctx, action, url, nil) if err != nil { return response, err } @@ -67,19 +68,18 @@ func ScaleDownCluster(httpclient *http.Client, clusterName, ScaleDownTarget stri } return response, err - } func ScaleQuery(httpclient *http.Client, arg string, SessionCredentials *msgs.BasicAuthCredentials, ns string) (msgs.ScaleQueryResponse, error) { - var response msgs.ScaleQueryResponse url := SessionCredentials.APIServerURL + "/scale/" + arg + "?version=" + msgs.PGO_VERSION + "&namespace=" + ns log.Debug(url) + ctx := context.TODO() action := "GET" - req, err := http.NewRequest(action, url, nil) + req, err := http.NewRequestWithContext(ctx, action, url, nil) if err != nil { return response, err } @@ -105,5 +105,4 @@ func ScaleQuery(httpclient *http.Client, arg string, SessionCredentials *msgs.Ba } return response, err - } diff --git a/cmd/pgo/api/schedule.go b/cmd/pgo/api/schedule.go index 4007e77e1b..ec86e094be 100644 --- a/cmd/pgo/api/schedule.go +++ b/cmd/pgo/api/schedule.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" "net/http" @@ -34,12 +35,13 @@ const ( func CreateSchedule(client *http.Client, SessionCredentials *msgs.BasicAuthCredentials, r *msgs.CreateScheduleRequest) (msgs.CreateScheduleResponse, error) { var response msgs.CreateScheduleResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(r) url := fmt.Sprintf(createScheduleURL, SessionCredentials.APIServerURL) log.Debugf("create schedule called [%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -69,12 +71,13 @@ func CreateSchedule(client *http.Client, SessionCredentials *msgs.BasicAuthCrede func DeleteSchedule(client *http.Client, SessionCredentials *msgs.BasicAuthCredentials, r *msgs.DeleteScheduleRequest) (msgs.DeleteScheduleResponse, error) { var response msgs.DeleteScheduleResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(r) url := fmt.Sprintf(deleteScheduleURL, SessionCredentials.APIServerURL) log.Debugf("delete schedule called [%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -105,11 +108,12 @@ func DeleteSchedule(client *http.Client, SessionCredentials *msgs.BasicAuthCrede func ShowSchedule(client *http.Client, SessionCredentials *msgs.BasicAuthCredentials, r *msgs.ShowScheduleRequest) (msgs.ShowScheduleResponse, error) { var response msgs.ShowScheduleResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(r) url := fmt.Sprintf(showScheduleURL, SessionCredentials.APIServerURL) log.Debugf("show schedule called [%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } diff --git a/cmd/pgo/api/status.go b/cmd/pgo/api/status.go index ad70bd2f96..b266bcfc64 100644 --- a/cmd/pgo/api/status.go +++ b/cmd/pgo/api/status.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,21 +16,23 @@ package api */ import ( + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowStatus(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, ns string) (msgs.StatusResponse, error) { - var response msgs.StatusResponse url := SessionCredentials.APIServerURL + "/status?version=" + msgs.PGO_VERSION + "&namespace=" + ns log.Debug(url) + ctx := context.TODO() action := "GET" - req, err := http.NewRequest(action, url, nil) + req, err := http.NewRequestWithContext(ctx, action, url, nil) if err != nil { return response, err } @@ -55,5 +57,4 @@ func ShowStatus(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCrede } return response, err - } diff --git a/cmd/pgo/api/test.go b/cmd/pgo/api/test.go index 887d67b056..dd42619a33 100644 --- a/cmd/pgo/api/test.go +++ b/cmd/pgo/api/test.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,23 +17,25 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowTest(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.ClusterTestRequest) (msgs.ClusterTestResponse, error) { - var response msgs.ClusterTestResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/testclusters" log.Debug(url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -58,5 +60,4 @@ func ShowTest(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredent } return response, err - } diff --git a/cmd/pgo/api/upgrade.go b/cmd/pgo/api/upgrade.go index 6079a29023..8b03c09311 100644 --- a/cmd/pgo/api/upgrade.go +++ b/cmd/pgo/api/upgrade.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package api import ( "bytes" + "context" "encoding/json" "net/http" @@ -25,15 +26,15 @@ import ( ) func CreateUpgrade(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreateUpgradeRequest) (msgs.CreateUpgradeResponse, error) { - var response msgs.CreateUpgradeResponse + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/upgrades" log.Debugf("CreateUpgrade called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } diff --git a/cmd/pgo/api/user.go b/cmd/pgo/api/user.go index 38424ab17b..3b848f3028 100644 --- a/cmd/pgo/api/user.go +++ b/cmd/pgo/api/user.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,25 +17,27 @@ package api import ( "bytes" + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowUser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.ShowUserRequest) (msgs.ShowUserResponse, error) { - var response msgs.ShowUserResponse response.Status.Code = msgs.Ok request.ClientVersion = msgs.PGO_VERSION + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/usershow" log.Debugf("ShowUser called...[%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { response.Status.Code = msgs.Error return response, err @@ -62,20 +64,20 @@ func ShowUser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredent } return response, err - } -func CreateUser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreateUserRequest) (msgs.CreateUserResponse, error) { +func CreateUser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.CreateUserRequest) (msgs.CreateUserResponse, error) { var response msgs.CreateUserResponse request.ClientVersion = msgs.PGO_VERSION + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/usercreate" log.Debugf("createUsers called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -104,17 +106,17 @@ func CreateUser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCrede } func DeleteUser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.DeleteUserRequest) (msgs.DeleteUserResponse, error) { - var response msgs.DeleteUserResponse request.ClientVersion = msgs.PGO_VERSION + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/userdelete" log.Debugf("deleteUser called...[%s]", url) action := "POST" - req, err := http.NewRequest(action, url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, action, url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } @@ -140,20 +142,19 @@ func DeleteUser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCrede } return response, err - } func UpdateUser(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials, request *msgs.UpdateUserRequest) (msgs.UpdateUserResponse, error) { - var response msgs.UpdateUserResponse request.ClientVersion = msgs.PGO_VERSION + ctx := context.TODO() jsonValue, _ := json.Marshal(request) url := SessionCredentials.APIServerURL + "/userupdate" log.Debugf("UpdateUser called...[%s]", url) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonValue)) + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonValue)) if err != nil { return response, err } diff --git a/cmd/pgo/api/version.go b/cmd/pgo/api/version.go index 9ca743add1..f5a5c22b68 100644 --- a/cmd/pgo/api/version.go +++ b/cmd/pgo/api/version.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,23 +16,25 @@ package api */ import ( + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowVersion(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCredentials) (msgs.VersionResponse, error) { - var response msgs.VersionResponse log.Debug("ShowVersion called ") + ctx := context.TODO() url := SessionCredentials.APIServerURL + "/version" log.Debug(url) - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return response, err } @@ -61,5 +63,4 @@ func ShowVersion(httpclient *http.Client, SessionCredentials *msgs.BasicAuthCred } return response, err - } diff --git a/cmd/pgo/api/workflow.go b/cmd/pgo/api/workflow.go index 3289329aa1..da1997e365 100644 --- a/cmd/pgo/api/workflow.go +++ b/cmd/pgo/api/workflow.go @@ -1,7 +1,7 @@ package api /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,22 +16,24 @@ package api */ import ( + "context" "encoding/json" "fmt" + "net/http" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) func ShowWorkflow(httpclient *http.Client, workflowID string, SessionCredentials *msgs.BasicAuthCredentials, ns string) (msgs.ShowWorkflowResponse, error) { - var response msgs.ShowWorkflowResponse url := SessionCredentials.APIServerURL + "/workflow/" + workflowID + "?version=" + msgs.PGO_VERSION + "&namespace=" + ns log.Debugf("ShowWorkflow called...[%s]", url) + ctx := context.TODO() action := "GET" - req, err := http.NewRequest(action, url, nil) + req, err := http.NewRequestWithContext(ctx, action, url, nil) if err != nil { return response, err } @@ -56,5 +58,4 @@ func ShowWorkflow(httpclient *http.Client, workflowID string, SessionCredentials } return response, err - } diff --git a/cmd/pgo/cmd/auth.go b/cmd/pgo/cmd/auth.go index 322e5f5e9c..341c8ff237 100644 --- a/cmd/pgo/cmd/auth.go +++ b/cmd/pgo/cmd/auth.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -109,7 +109,7 @@ func getCredentialsFromFile() msgs.BasicAuthCredentials { fullPath := dir + "/" + ".pgouser" var creds msgs.BasicAuthCredentials - //look in env var for pgouser file + // look in env var for pgouser file pgoUser := os.Getenv(pgoUserFileEnvVar) if pgoUser != "" { fullPath = pgoUser @@ -125,7 +125,7 @@ func getCredentialsFromFile() msgs.BasicAuthCredentials { found = true } - //look in home directory for .pgouser file + // look in home directory for .pgouser file if !found { log.Debugf("looking in %s for credentials", fullPath) dat, err := ioutil.ReadFile(fullPath) @@ -140,7 +140,7 @@ func getCredentialsFromFile() msgs.BasicAuthCredentials { } } - //look in etc for pgouser file + // look in etc for pgouser file if !found { fullPath = "/etc/pgo/pgouser" dat, err := ioutil.ReadFile(fullPath) @@ -210,7 +210,7 @@ func GetTLSTransport() (*http.Transport, error) { caCertPool = x509.NewCertPool() } else { if pool, err := x509.SystemCertPool(); err != nil { - return nil, fmt.Errorf("while loading System CA pool - %s", err) + return nil, fmt.Errorf("while loading System CA pool - %w", err) } else { caCertPool = pool } @@ -227,12 +227,12 @@ func GetTLSTransport() (*http.Transport, error) { // Open trust file and extend trust pool if trustFile, err := os.Open(caCertPath); err != nil { - newErr := fmt.Errorf("unable to load TLS trust from %s - [%v]", caCertPath, err) + newErr := fmt.Errorf("unable to load TLS trust from %s - %w", caCertPath, err) return nil, newErr } else { err = tlsutil.ExtendTrust(caCertPool, trustFile) if err != nil { - newErr := fmt.Errorf("error reading %s - %v", caCertPath, err) + newErr := fmt.Errorf("error reading %s - %w", caCertPath, err) return nil, newErr } trustFile.Close() @@ -258,10 +258,11 @@ func GetTLSTransport() (*http.Transport, error) { certPair, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) if err != nil { - return nil, fmt.Errorf("client certificate/key loading: %s", err) + return nil, fmt.Errorf("client certificate/key loading: %w", err) } // create a Transport object for use by the HTTP client + // #nosec: G402 return &http.Transport{ TLSClientConfig: &tls.Config{ RootCAs: caCertPool, diff --git a/cmd/pgo/cmd/backrest.go b/cmd/pgo/cmd/backrest.go index 46d843206e..04a2c29c95 100644 --- a/cmd/pgo/cmd/backrest.go +++ b/cmd/pgo/cmd/backrest.go @@ -2,7 +2,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -57,7 +57,6 @@ func createBackrestBackup(args []string, ns string) { fmt.Println("No clusters found.") return } - } // showBackrest .... @@ -84,8 +83,8 @@ func showBackrest(args []string, ns string) { log.Debugf("response = %v", response) log.Debugf("len of items = %d", len(response.Items)) - for _, backup := range response.Items { - printBackrest(&backup) + for i := range response.Items { + printBackrest(&response.Items[i]) } } } diff --git a/cmd/pgo/cmd/backup.go b/cmd/pgo/cmd/backup.go index 61225cc3ea..23306c7544 100644 --- a/cmd/pgo/cmd/backup.go +++ b/cmd/pgo/cmd/backup.go @@ -2,7 +2,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,8 +18,12 @@ package cmd import ( "fmt" + "os" + "github.com/crunchydata/postgres-operator/cmd/pgo/api" "github.com/crunchydata/postgres-operator/internal/config" + msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -74,7 +78,6 @@ var backupCmd = &cobra.Command{ } } - }, } @@ -88,11 +91,33 @@ func init() { backupCmd.Flags().StringVarP(&PVCName, "pvc-name", "", "", "The PVC name to use for the backup instead of the default.") backupCmd.Flags().StringVarP(&PGDumpDB, "database", "d", "postgres", "The name of the database pgdump will backup.") backupCmd.Flags().StringVar(&backupType, "backup-type", "pgbackrest", "The backup type to perform. Default is pgbackrest. Valid backup types are pgbackrest and pgdump.") - backupCmd.Flags().StringVarP(&BackrestStorageType, "pgbackrest-storage-type", "", "", "The type of storage to use when scheduling pgBackRest backups. Either \"local\", \"s3\" or both, comma separated. (default \"local\")") - + backupCmd.Flags().StringVarP(&BackrestStorageType, "pgbackrest-storage-type", "", "", "The type of storage to use when scheduling pgBackRest backups. Either \"posix\", \"s3\" or both, comma separated. (default \"posix\")") } // deleteBackup .... -func deleteBackup(args []string, ns string) { - log.Debugf("deleteBackup called %v", args) +func deleteBackup(namespace, clusterName string) { + request := msgs.DeleteBackrestBackupRequest{ + ClusterName: clusterName, + Namespace: namespace, + Target: Target, + } + + // make the request + response, err := api.DeleteBackup(httpclient, &SessionCredentials, request) + + // if everything is OK, exit early + if err == nil && response.Status.Code == msgs.Ok { + return + } + + // if there is an error, or the response code is not ok, print the error and + // exit + if err != nil { + fmt.Println("Error: " + err.Error()) + } else if response.Status.Code == msgs.Error { + fmt.Println("Error: " + response.Status.Msg) + } + + // since we can only have errors at this point, exit with error + os.Exit(1) } diff --git a/cmd/pgo/cmd/cat.go b/cmd/pgo/cmd/cat.go index 08b3a93109..a7a78deb77 100644 --- a/cmd/pgo/cmd/cat.go +++ b/cmd/pgo/cmd/cat.go @@ -2,7 +2,7 @@ package cmd /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -42,7 +42,6 @@ var catCmd = &cobra.Command{ } else { cat(args, Namespace) } - }, } @@ -58,7 +57,6 @@ func cat(args []string, ns string) { request.Args = args request.Namespace = ns response, err := api.Cat(httpclient, &SessionCredentials, request) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -77,5 +75,4 @@ func cat(args []string, ns string) { fmt.Println("No clusters found.") return } - } diff --git a/cmd/pgo/cmd/cluster.go b/cmd/pgo/cmd/cluster.go index 12179d8fcf..106c0eb818 100644 --- a/cmd/pgo/cmd/cluster.go +++ b/cmd/pgo/cmd/cluster.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -21,13 +21,14 @@ import ( "os" "strings" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + v1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/cmd/pgo/api" "github.com/crunchydata/postgres-operator/cmd/pgo/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" - log "github.com/sirupsen/logrus" ) // below are the tablespace parameters and the expected values of each @@ -75,7 +76,6 @@ func deleteCluster(args []string, ns string) { for _, arg := range args { r.Clustername = arg response, err := api.DeleteCluster(httpclient, &r, &SessionCredentials) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -90,12 +90,10 @@ func deleteCluster(args []string, ns string) { } } - } // showCluster ... func showCluster(args []string, ns string) { - log.Debugf("showCluster called %v", args) if OutputFormat != "" { @@ -149,12 +147,11 @@ func showCluster(args []string, ns string) { return } - for _, clusterDetail := range response.Results { - printCluster(&clusterDetail) + for i := range response.Results { + printCluster(&response.Results[i]) } } - } // printCluster @@ -173,7 +170,7 @@ func printCluster(detail *msgs.ShowClusterDetail) { podStr := fmt.Sprintf("%spod : %s (%s) on %s (%s) %s", TreeBranch, pod.Name, string(pod.Phase), pod.NodeName, pod.ReadyStatus, podType) fmt.Println(podStr) for _, pvc := range pod.PVC { - fmt.Println(fmt.Sprintf("%spvc: %s (%s)", TreeBranch+TreeBranch, pvc.Name, pvc.Capacity)) + fmt.Printf("%spvc: %s (%s)\n", TreeBranch+TreeBranch, pvc.Name, pvc.Capacity) } } @@ -237,7 +234,6 @@ func printCluster(detail *msgs.ShowClusterDetail) { fmt.Printf("%s=%s ", k, v) } fmt.Println("") - } func printPolicies(d *msgs.ShowClusterDeployment) { @@ -264,13 +260,14 @@ func createCluster(args []string, ns string, createClusterCmd *cobra.Command) { r.Name = args[0] r.Namespace = ns r.ReplicaCount = ClusterReplicaCount + r.NodeAffinityType = getNodeAffinityType(NodeLabel, NodeAffinityType) r.NodeLabel = NodeLabel r.PasswordLength = PasswordLength r.PasswordSuperuser = PasswordSuperuser r.PasswordReplication = PasswordReplication r.Password = Password r.SecretFrom = SecretFrom - r.UserLabels = UserLabels + r.UserLabels = getLabels(UserLabels) r.Policies = PoliciesFlag r.CCPImageTag = CCPImageTag r.CCPImage = CCPImage @@ -282,7 +279,8 @@ func createCluster(args []string, ns string, createClusterCmd *cobra.Command) { r.ExporterMemoryRequest = ExporterMemoryRequest r.ExporterMemoryLimit = ExporterMemoryLimit r.BadgerFlag = BadgerFlag - r.ServiceType = ServiceType + r.ServiceType = v1.ServiceType(ServiceType) + r.PgBouncerServiceType = v1.ServiceType(ServiceTypePgBouncer) r.AutofailFlag = !DisableAutofailFlag r.PgbouncerFlag = PgbouncerFlag r.BackrestStorageConfig = BackrestStorageConfig @@ -327,6 +325,7 @@ func createCluster(args []string, ns string, createClusterCmd *cobra.Command) { r.PgBouncerMemoryRequest = PgBouncerMemoryRequest r.PgBouncerMemoryLimit = PgBouncerMemoryLimit r.PgBouncerReplicas = PgBouncerReplicas + r.PgBouncerTLSSecret = PgBouncerTLSSecret // determine if the user wants to create tablespaces as part of this request, // and if so, set the values r.Tablespaces = getTablespaces(Tablespaces) @@ -337,6 +336,8 @@ func createCluster(args []string, ns string, createClusterCmd *cobra.Command) { // set any annotations r.Annotations = getClusterAnnotations(Annotations, AnnotationsPostgres, AnnotationsBackrest, AnnotationsPgBouncer) + // set any tolerations + r.Tolerations = getClusterTolerations(Tolerations, false) // only set SyncReplication in the request if actually provided via the CLI if createClusterCmd.Flag("sync-replication").Changed { @@ -547,6 +548,110 @@ func getTablespaces(tablespaceParams []string) []msgs.ClusterTablespaceDetail { return tablespaces } +// getClusterTolerations determines if there are any Pod tolerations to set +// and converts from the defined string form to the standard Toleration object +// +// The strings should follow the following formats: +// +// Operator - rule:Effect +// +// Exists: +// - key +// - key:Effect +// +// Equals: +// - key=value +// - key=value:Effect +// +// If the remove flag is set to true, check for a trailing "-" at the end of +// each item, as this will be a remove list. Otherwise, only consider +// tolerations that are not being removed +func getClusterTolerations(tolerationList []string, remove bool) []v1.Toleration { + tolerations := make([]v1.Toleration, 0) + + // if no tolerations, exit early + if len(tolerationList) == 0 { + return tolerations + } + + // begin the joys of parsing + for _, t := range tolerationList { + toleration := v1.Toleration{} + ruleEffect := strings.Split(t, ":") + + // if we don't have exactly two items, then error + if len(ruleEffect) < 1 || len(ruleEffect) > 2 { + fmt.Printf("invalid format for toleration: %q\n", t) + os.Exit(1) + } + + // for ease of reading + rule, effectStr := ruleEffect[0], "" + // effect string is only set if ruleEffect is of length 2 + if len(ruleEffect) == 2 { + effectStr = ruleEffect[1] + } + + // determine if the effect is for removal or not, as we will continue the + // loop based on that. + // + // In other words, skip processing the value if either: + // - This *is* removal mode AND the value *does not* have the removal suffix "-" + // - This *is not* removal mode AND the value *does* have the removal suffix "-" + if (remove && !strings.HasSuffix(effectStr, "-") && !strings.HasSuffix(rule, "-")) || + (!remove && (strings.HasSuffix(effectStr, "-") || strings.HasSuffix(rule, "-"))) { + continue + } + + // no matter what we can trim any trailing "-" off of the string, and cast + // it as a TaintEffect + rule = strings.TrimSuffix(rule, "-") + effect := v1.TaintEffect(strings.TrimSuffix(effectStr, "-")) + + // see if the effect is a valid effect + if !isValidTaintEffect(effect) { + fmt.Printf("invalid taint effect for toleration: %q\n", effect) + os.Exit(1) + } + + toleration.Effect = effect + + // determine if the rule is an Exists or Equals operation + keyValue := strings.Split(rule, "=") + + if len(keyValue) < 1 || len(keyValue) > 2 { + fmt.Printf("invalid rule for toleration: %q\n", rule) + os.Exit(1) + } + + // no matter what we have a key + toleration.Key = keyValue[0] + + // the following determine the operation to use for the toleration and if + // we should assign a value + if len(keyValue) == 1 { + toleration.Operator = v1.TolerationOpExists + } else { + toleration.Operator = v1.TolerationOpEqual + toleration.Value = keyValue[1] + } + + // and append to the list of tolerations + tolerations = append(tolerations, toleration) + } + + return tolerations +} + +// isValidTaintEffect returns true if the effect passed in is a valid +// TaintEffect, otherwise false +func isValidTaintEffect(taintEffect v1.TaintEffect) bool { + return (taintEffect == v1.TaintEffectNoSchedule || + taintEffect == v1.TaintEffectPreferNoSchedule || + taintEffect == v1.TaintEffectNoExecute || + taintEffect == "") +} + // isTablespaceParam returns true if the parameter in question is acceptable for // using with a tablespace. func isTablespaceParam(param string) bool { @@ -596,7 +701,9 @@ func updateCluster(args []string, ns string) { r.ExporterCPULimit = ExporterCPULimit r.ExporterMemoryRequest = ExporterMemoryRequest r.ExporterMemoryLimit = ExporterMemoryLimit + r.ExporterRotatePassword = ExporterRotatePassword r.Clustername = args + r.ServiceType = v1.ServiceType(ServiceType) r.Startup = Startup r.Shutdown = Shutdown // set the container resource requests @@ -610,6 +717,8 @@ func updateCluster(args []string, ns string) { // set any annotations r.Annotations = getClusterAnnotations(Annotations, AnnotationsPostgres, AnnotationsBackrest, AnnotationsPgBouncer) + r.Tolerations = getClusterTolerations(Tolerations, false) + r.TolerationsDelete = getClusterTolerations(Tolerations, true) // check to see if EnableStandby or DisableStandby is set. If so, // set a value for Standby @@ -627,6 +736,20 @@ func updateCluster(args []string, ns string) { r.Autofail = msgs.UpdateClusterAutofailDisable } + // check to see if the metrics sidecar needs to be enabled or disabled + if EnableMetrics { + r.Metrics = msgs.UpdateClusterMetricsEnable + } else if DisableMetrics { + r.Metrics = msgs.UpdateClusterMetricsDisable + } + + // check to see if the pgBadger sidecar needs to be enabled or disabled + if EnablePGBadger { + r.PGBadger = msgs.UpdateClusterPGBadgerEnable + } else if DisablePGBadger { + r.PGBadger = msgs.UpdateClusterPGBadgerDisable + } + // if the user provided resources for CPU or Memory, validate them to ensure // they are valid Kubernetes values if err := util.ValidateQuantity(r.CPURequest, "cpu"); err != nil { @@ -690,7 +813,6 @@ func updateCluster(args []string, ns string) { } response, err := api.UpdateCluster(httpclient, &r, &SessionCredentials) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -703,5 +825,4 @@ func updateCluster(args []string, ns string) { } else { fmt.Println("Error: " + response.Status.Msg) } - } diff --git a/cmd/pgo/cmd/common.go b/cmd/pgo/cmd/common.go index f1e8f84e70..c0ec21439c 100644 --- a/cmd/pgo/cmd/common.go +++ b/cmd/pgo/cmd/common.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,7 +18,12 @@ package cmd import ( "encoding/json" "fmt" + "os" "reflect" + "strings" + + operatorutil "github.com/crunchydata/postgres-operator/internal/util" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" ) // unitType is used to group together the unit types @@ -82,6 +87,33 @@ func getHeaderLength(value interface{}, fieldName string) int { return len(field.String()) } +// getLabels determines if the provided labels are in the correct format, +// and if so, will return them in the appropriate map. +// +// If not, we will abort. +func getLabels(labels []string) map[string]string { + clusterLabels := map[string]string{} + + for _, label := range labels { + parts := strings.Split(label, "=") + + if len(parts) != 2 { + fmt.Printf("invalid label: found %q, should be \"key=value\"\n", label) + os.Exit(1) + } + + clusterLabels[parts[0]] = parts[1] + } + + // perform a validation that can save us a round trip to the server + if err := operatorutil.ValidateLabels(clusterLabels); err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + + return clusterLabels +} + // getMaxLength returns the maxLength of the strings of a particular value in // the struct. Increases the max length by 1 to include a buffer func getMaxLength(results []interface{}, title, fieldName string) int { @@ -98,6 +130,36 @@ func getMaxLength(results []interface{}, title, fieldName string) int { return maxLength + 1 } +// getNodeAffinityType takes a string value of "NodeAffinityType" and converts +// it to the proper enumeration +func getNodeAffinityType(nodeLabel, nodeAffinityType string) crv1.NodeAffinityType { + // if nodeAffinityType is not set, just exit with the default + if nodeAffinityType == "" { + return crv1.NodeAffinityTypePreferred + } + + // force an exit if nodeAffinityType is set but nodeLabel is not + if nodeLabel == "" && nodeAffinityType != "" { + fmt.Println("error: --node-affinity-type set, but --node-label not set") + os.Exit(1) + } + + // and away we go + switch nodeAffinityType { + default: + fmt.Printf("error: invalid node affinity type %q. choices are: preferred required\n", nodeAffinityType) + os.Exit(1) + case "preferred", "prefer": + return crv1.NodeAffinityTypePreferred + case "required", "require": + return crv1.NodeAffinityTypeRequired + } + + // one should never get to here because of the exit, but we need to compile + // the program. Yes, we really shouldn't be exiting. + return crv1.NodeAffinityTypePreferred +} + // getSizeAndUnit determines the best size to return based on the best unit // where unit is KB, MB, GB, etc... func getSizeAndUnit(size int64) (float64, unitType) { diff --git a/cmd/pgo/cmd/config.go b/cmd/pgo/cmd/config.go index b8e4d89d0f..78b4086077 100644 --- a/cmd/pgo/cmd/config.go +++ b/cmd/pgo/cmd/config.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -28,11 +28,9 @@ import ( ) func showConfig(args []string, ns string) { - log.Debugf("showConfig called %v", args) response, err := api.ShowConfig(httpclient, &SessionCredentials, ns) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -60,5 +58,4 @@ func showConfig(args []string, ns string) { } fmt.Println(string(y)) - } diff --git a/cmd/pgo/cmd/create.go b/cmd/pgo/cmd/create.go index 57e4e77eb6..a307c2b99a 100644 --- a/cmd/pgo/cmd/create.go +++ b/cmd/pgo/cmd/create.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -23,51 +23,54 @@ import ( "github.com/spf13/cobra" ) -var ClusterReplicaCount int -var ManagedUser bool -var AllNamespaces bool -var BackrestStorageConfig, ReplicaStorageConfig, StorageConfig string -var CustomConfig string -var ArchiveFlag, DisableAutofailFlag, EnableAutofailFlag, PgbouncerFlag, MetricsFlag, BadgerFlag bool -var BackrestRestoreFrom string -var CCPImage string -var CCPImageTag string -var CCPImagePrefix string -var PGOImagePrefix string -var Database string -var Password string -var SecretFrom string -var PoliciesFlag, PolicyFile, PolicyURL string -var UserLabels string -var Tablespaces []string -var ServiceType string -var Schedule string -var ScheduleOptions string -var ScheduleType string -var SchedulePolicy string -var ScheduleDatabase string -var ScheduleSecret string -var PGBackRestType string -var Secret string -var PgouserPassword, PgouserRoles, PgouserNamespaces string -var Permissions string -var PodAntiAffinity string -var PodAntiAffinityPgBackRest string -var PodAntiAffinityPgBouncer string -var SyncReplication bool -var BackrestConfig string -var BackrestS3Key string -var BackrestS3KeySecret string -var BackrestS3Bucket string -var BackrestS3Endpoint string -var BackrestS3Region string -var BackrestS3URIStyle string -var BackrestS3VerifyTLS bool -var PVCSize string -var BackrestPVCSize string -var WALStorageConfig string -var WALPVCSize string -var RestoreFrom string +var ( + ClusterReplicaCount int + ManagedUser bool + AllNamespaces bool + BackrestStorageConfig, ReplicaStorageConfig, StorageConfig string + CustomConfig string + ArchiveFlag, DisableAutofailFlag, EnableAutofailFlag, PgbouncerFlag, MetricsFlag, BadgerFlag bool + BackrestRestoreFrom string + CCPImage string + CCPImageTag string + CCPImagePrefix string + PGOImagePrefix string + Database string + Password string + SecretFrom string + PoliciesFlag, PolicyFile string + UserLabels []string + Tablespaces []string + ServiceType string + ServiceTypePgBouncer string + Schedule string + ScheduleOptions string + ScheduleType string + SchedulePolicy string + ScheduleDatabase string + ScheduleSecret string + PGBackRestType string + Secret string + PgouserPassword, PgouserRoles, PgouserNamespaces string + Permissions string + PodAntiAffinity string + PodAntiAffinityPgBackRest string + PodAntiAffinityPgBouncer string + SyncReplication bool + BackrestConfig string + BackrestS3Key string + BackrestS3KeySecret string + BackrestS3Bucket string + BackrestS3Endpoint string + BackrestS3Region string + BackrestS3URIStyle string + BackrestS3VerifyTLS bool + PVCSize string + BackrestPVCSize string + WALStorageConfig string + WALPVCSize string + RestoreFrom string +) // group the annotation requests var ( @@ -107,6 +110,10 @@ var BackrestS3CASecretName string // BackrestRepoPath allows the pgBackRest repo path to be defined instead of using the default var BackrestRepoPath string +// NodeAffinityType needs to be used with "NodeLabel" and can be one of +// "preferred" or "required" -- gets mapped to an enumeration +var NodeAffinityType string + // Standby determines whether or not the cluster should be created as a standby cluster var Standby bool @@ -122,6 +129,9 @@ var PasswordReplication string // variables used for setting up TLS-enabled PostgreSQL clusters var ( + // PgBouncerTLSSecret is the name of the secret that contains the + // TLS information for enabling TLS for pgBouncer + PgBouncerTLSSecret string // TLSOnly indicates that only TLS connections will be accepted for a // PostgreSQL cluster TLSOnly bool @@ -137,6 +147,23 @@ var ( CASecret string ) +// Tolerations is a collection of Pod tolerations that can be applied, which +// use the following format for the different operations +// +// Exists - key:Effect +// Equals - key=value:Effect +// +// Effect can be optional. +// +// Example: +// +// zone=east:NoSchedule,highspeed:NoSchedule +// +// A toleration can be removed by adding a "-" to the end, e.g.: +// +// zone=east:NoSchedule- +var Tolerations []string + var CreateCmd = &cobra.Command{ Use: "create", Short: "Create a Postgres Operator resource", @@ -219,8 +246,8 @@ var createPolicyCmd = &cobra.Command{ Namespace = PGONamespace } log.Debug("create policy called ") - if PolicyFile == "" && PolicyURL == "" { - fmt.Println(`Error: The --in-file or --url flags are required to create a policy.`) + if PolicyFile == "" { + fmt.Println(`Error: The --in-file is required to create a policy.`) return } @@ -240,7 +267,6 @@ var createPgAdminCmd = &cobra.Command{ pgo create pgadmin mycluster`, Run: func(cmd *cobra.Command, args []string) { - if Namespace == "" { Namespace = PGONamespace } @@ -262,7 +288,6 @@ var createPgbouncerCmd = &cobra.Command{ pgo create pgbouncer mycluster`, Run: func(cmd *cobra.Command, args []string) { - if Namespace == "" { Namespace = PGONamespace } @@ -290,7 +315,6 @@ var createScheduleCmd = &cobra.Command{ pgo create schedule --schedule="* * * * *" --schedule-type=pgbackrest --pgbackrest-backup-type=full mycluster`, Run: func(cmd *cobra.Command, args []string) { - if Namespace == "" { Namespace = PGONamespace } @@ -314,7 +338,6 @@ var createUserCmd = &cobra.Command{ pgo create user --username=someuser -selector=name=mycluster --managed pgo create user --username=user1 --selector=name=mycluster`, Run: func(cmd *cobra.Command, args []string) { - if Namespace == "" { Namespace = PGONamespace } @@ -369,7 +392,8 @@ func init() { createClusterCmd.Flags().StringVarP(&CustomConfig, "custom-config", "", "", "The name of a configMap that holds custom PostgreSQL configuration files used to override defaults.") createClusterCmd.Flags().StringVarP(&Database, "database", "d", "", "If specified, sets the name of the initial database that is created for the user. Defaults to the value set in the PostgreSQL Operator configuration, or if that is not present, the name of the cluster") createClusterCmd.Flags().BoolVarP(&DisableAutofailFlag, "disable-autofail", "", false, "Disables autofail capabitilies in the cluster following cluster initialization.") - createClusterCmd.Flags().StringVarP(&UserLabels, "labels", "l", "", "The labels to apply to this cluster.") + createClusterCmd.Flags().StringSliceVar(&UserLabels, "label", []string{}, "Add labels to apply to the PostgreSQL cluster, "+ + "e.g. \"key=value\", \"prefix/key=value\". Can specify flag multiple times.") createClusterCmd.Flags().StringVar(&MemoryRequest, "memory", "", "Set the amount of RAM to request, e.g. "+ "1GiB. Overrides the default server value.") createClusterCmd.Flags().StringVar(&MemoryLimit, "memory-limit", "", "Set the amount of RAM to limit, e.g. "+ @@ -383,6 +407,8 @@ func init() { "the Crunchy Postgres Exporter sidecar container. Defaults to server value (24Mi).") createClusterCmd.Flags().StringVar(&ExporterMemoryLimit, "exporter-memory-limit", "", "Set the amount of memory to limit for "+ "the Crunchy Postgres Exporter sidecar container.") + createClusterCmd.Flags().StringVar(&NodeAffinityType, "node-affinity-type", "", "Sets the type of node affinity to use. "+ + "Can be either preferred (default) or required. Must be used with --node-label") createClusterCmd.Flags().StringVarP(&NodeLabel, "node-label", "", "", "The node label (key=value) to use in placing the primary database. If not set, any node is used.") createClusterCmd.Flags().StringVarP(&Password, "password", "", "", "The password to use for standard user account created during cluster initialization.") createClusterCmd.Flags().IntVarP(&PasswordLength, "password-length", "", 0, "If no password is supplied, sets the length of the automatically generated password. Defaults to the value set on the server.") @@ -398,7 +424,7 @@ func init() { createClusterCmd.Flags().StringVar(&BackrestMemoryLimit, "pgbackrest-memory-limit", "", "Set the amount of memory to limit for "+ "the pgBackRest repository.") createClusterCmd.Flags().StringVarP(&BackrestPVCSize, "pgbackrest-pvc-size", "", "", - `The size of the PVC capacity for the pgBackRest repository. Overrides the value set in the storage class. This is ignored if the storage type of "local" is not used. Must follow the standard Kubernetes format, e.g. "10.1Gi"`) + `The size of the PVC capacity for the pgBackRest repository. Overrides the value set in the storage class. This is ignored if the storage type of "posix" is not used. Must follow the standard Kubernetes format, e.g. "10.1Gi"`) createClusterCmd.Flags().StringVarP(&BackrestRepoPath, "pgbackrest-repo-path", "", "", "The pgBackRest repository path that should be utilized instead of the default. Required "+ "for standby\nclusters to define the location of an existing pgBackRest repository.") @@ -423,7 +449,7 @@ func init() { createClusterCmd.Flags().StringVarP(&BackrestS3URIStyle, "pgbackrest-s3-uri-style", "", "", "Specifies whether \"host\" or \"path\" style URIs will be used when connecting to S3.") createClusterCmd.Flags().BoolVarP(&BackrestS3VerifyTLS, "pgbackrest-s3-verify-tls", "", true, "This sets if pgBackRest should verify the TLS certificate when connecting to S3. To disable, use \"--pgbackrest-s3-verify-tls=false\".") createClusterCmd.Flags().StringVar(&BackrestStorageConfig, "pgbackrest-storage-config", "", "The name of the storage config in pgo.yaml to use for the pgBackRest local repository.") - createClusterCmd.Flags().StringVarP(&BackrestStorageType, "pgbackrest-storage-type", "", "", "The type of storage to use with pgBackRest. Either \"local\", \"s3\" or both, comma separated. (default \"local\")") + createClusterCmd.Flags().StringVarP(&BackrestStorageType, "pgbackrest-storage-type", "", "", "The type of storage to use with pgBackRest. Either \"posix\", \"s3\" or both, comma separated. (default \"posix\")") createClusterCmd.Flags().BoolVarP(&BadgerFlag, "pgbadger", "", false, "Adds the crunchy-pgbadger container to the database pod.") createClusterCmd.Flags().BoolVarP(&PgbouncerFlag, "pgbouncer", "", false, "Adds a crunchy-pgbouncer deployment to the cluster.") createClusterCmd.Flags().StringVar(&PgBouncerCPURequest, "pgbouncer-cpu", "", "Set the number of millicores to request for CPU "+ @@ -435,6 +461,10 @@ func init() { createClusterCmd.Flags().StringVar(&PgBouncerMemoryLimit, "pgbouncer-memory-limit", "", "Set the amount of memory to limit for "+ "pgBouncer.") createClusterCmd.Flags().Int32Var(&PgBouncerReplicas, "pgbouncer-replicas", 0, "Set the total number of pgBouncer instances to deploy. If not set, defaults to 1.") + createClusterCmd.Flags().StringVar(&ServiceTypePgBouncer, "pgbouncer-service-type", "", "The Service type to use for pgBouncer. Defaults to the Service type of the PostgreSQL cluster.") + createClusterCmd.Flags().StringVar(&PgBouncerTLSSecret, "pgbouncer-tls-secret", "", "The name of the secret "+ + "that contains the TLS keypair to use for enabling pgBouncer to accept TLS connections. "+ + "Must also set server-tls-secret and server-ca-secret.") createClusterCmd.Flags().StringVarP(&ReplicaStorageConfig, "replica-storage-config", "", "", "The name of a Storage config in pgo.yaml to use for the cluster replica storage.") createClusterCmd.Flags().StringVarP(&PodAntiAffinity, "pod-anti-affinity", "", "", "Specifies the type of anti-affinity that should be utilized when applying "+ @@ -465,13 +495,17 @@ func init() { createClusterCmd.Flags().StringVar(&TLSSecret, "server-tls-secret", "", "The name of the secret that contains "+ "the TLS keypair to use for enabling the PostgreSQL cluster to accept TLS connections. "+ "Must be used with \"server-ca-secret\"") - createClusterCmd.Flags().StringVarP(&ServiceType, "service-type", "", "", "The Service type to use for the PostgreSQL cluster. If not set, the pgo.yaml default will be used.") + createClusterCmd.Flags().StringVar(&ServiceType, "service-type", "", "The Service type to use for the PostgreSQL cluster. If not set, the pgo.yaml default will be used.") createClusterCmd.Flags().BoolVar(&ShowSystemAccounts, "show-system-accounts", false, "Include the system accounts in the results.") createClusterCmd.Flags().StringVarP(&StorageConfig, "storage-config", "", "", "The name of a Storage config in pgo.yaml to use for the cluster storage.") createClusterCmd.Flags().BoolVarP(&SyncReplication, "sync-replication", "", false, "Enables synchronous replication for the cluster.") createClusterCmd.Flags().BoolVar(&TLSOnly, "tls-only", false, "If true, forces all PostgreSQL connections to be over TLS. "+ "Must also set \"server-tls-secret\" and \"server-ca-secret\"") + createClusterCmd.Flags().StringSliceVar(&Tolerations, "toleration", []string{}, + "Set Pod tolerations for each PostgreSQL instance in a cluster.\n"+ + "The general format is \"key=value:Effect\"\n"+ + "For example, to add an Exists and an Equals toleration: \"--toleration=ssd:NoSchedule,zone=east:NoSchedule\"") createClusterCmd.Flags().BoolVarP(&Standby, "standby", "", false, "Creates a standby cluster "+ "that replicates from a pgBackRest repository in AWS S3.") createClusterCmd.Flags().StringSliceVar(&Tablespaces, "tablespace", []string{}, @@ -496,7 +530,7 @@ func init() { // pgo create pgbouncer createPgbouncerCmd.Flags().StringVar(&PgBouncerCPURequest, "cpu", "", "Set the number of millicores to request for CPU "+ "for pgBouncer. Defaults to being unset.") - createPgbouncerCmd.Flags().StringVar(&PgBouncerCPULimit, "cpu-limit", "", "Set the number of millicores to request for CPU "+ + createPgbouncerCmd.Flags().StringVar(&PgBouncerCPULimit, "cpu-limit", "", "Set the number of millicores to limit for CPU "+ "for pgBouncer.") createPgbouncerCmd.Flags().StringVar(&PgBouncerMemoryRequest, "memory", "", "Set the amount of memory to request for "+ "pgBouncer. Defaults to server value (24Mi).") @@ -504,6 +538,10 @@ func init() { "pgBouncer.") createPgbouncerCmd.Flags().Int32Var(&PgBouncerReplicas, "replicas", 0, "Set the total number of pgBouncer instances to deploy. If not set, defaults to 1.") createPgbouncerCmd.Flags().StringVarP(&Selector, "selector", "s", "", "The selector to use for cluster filtering.") + createPgbouncerCmd.Flags().StringVar(&ServiceType, "service-type", "", "The Service type to use for pgBouncer. Defaults to the Service type of the PostgreSQL cluster.") + createPgbouncerCmd.Flags().StringVar(&PgBouncerTLSSecret, "tls-secret", "", "The name of the secret "+ + "that contains the TLS keypair to use for enabling pgBouncer to accept TLS connections. "+ + "The PostgreSQL cluster must have TLS enabled.") // "pgo create pgouser" flags createPgouserCmd.Flags().BoolVarP(&AllNamespaces, "all-namespaces", "", false, "specifies this user will have access to all namespaces.") @@ -514,12 +552,11 @@ func init() { // "pgo create policy" flags createPolicyCmd.Flags().StringVarP(&PolicyFile, "in-file", "i", "", "The policy file path to use for adding a policy.") - createPolicyCmd.Flags().StringVarP(&PolicyURL, "url", "u", "", "The url to use for adding a policy.") // "pgo create schedule" flags createScheduleCmd.Flags().StringVarP(&ScheduleDatabase, "database", "", "", "The database to run the SQL policy against.") createScheduleCmd.Flags().StringVarP(&PGBackRestType, "pgbackrest-backup-type", "", "", "The type of pgBackRest backup to schedule (full, diff or incr).") - createScheduleCmd.Flags().StringVarP(&BackrestStorageType, "pgbackrest-storage-type", "", "", "The type of storage to use when scheduling pgBackRest backups. Either \"local\", \"s3\" or both, comma separated. (default \"local\")") + createScheduleCmd.Flags().StringVarP(&BackrestStorageType, "pgbackrest-storage-type", "", "", "The type of storage to use when scheduling pgBackRest backups. Either \"posix\", \"s3\" or both, comma separated. (default \"posix\")") createScheduleCmd.Flags().StringVarP(&CCPImageTag, "ccp-image-tag", "c", "", "The CCPImageTag to use for cluster creation. If specified, overrides the pgo.yaml setting.") createScheduleCmd.Flags().StringVarP(&SchedulePolicy, "policy", "", "", "The policy to use for SQL schedules.") createScheduleCmd.Flags().StringVarP(&Schedule, "schedule", "", "", "The schedule assigned to the cron task.") diff --git a/cmd/pgo/cmd/delete.go b/cmd/pgo/cmd/delete.go index c4dce568a6..7750feca4e 100644 --- a/cmd/pgo/cmd/delete.go +++ b/cmd/pgo/cmd/delete.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package cmd import ( "fmt" + "os" "github.com/crunchydata/postgres-operator/cmd/pgo/util" "github.com/spf13/cobra" @@ -36,7 +37,7 @@ var deleteCmd = &cobra.Command{ Short: "Delete an Operator resource", Long: `The delete command allows you to delete an Operator resource. For example: - pgo delete backup mycluster + pgo delete backup mycluster --target=backup-name pgo delete cluster mycluster pgo delete cluster mycluster --delete-data pgo delete cluster mycluster --delete-data --delete-backups @@ -53,7 +54,6 @@ var deleteCmd = &cobra.Command{ pgo delete schedule mycluster pgo delete user --username=testuser --selector=name=mycluster`, Run: func(cmd *cobra.Command, args []string) { - if len(args) == 0 { fmt.Println(`Error: You must specify the type of resource to delete. Valid resource types include: * backup @@ -94,7 +94,6 @@ var deleteCmd = &cobra.Command{ * user`) } } - }, } @@ -118,6 +117,13 @@ func init() { // "pgo delete backup" // used to delete backups deleteCmd.AddCommand(deleteBackupCmd) + // "pgo delete backup --no-prompt" + // disables the verification prompt for deleting a backup + deleteBackupCmd.Flags().BoolVar(&NoPrompt, "no-prompt", false, "No command line confirmation.") + // "pgo delete backup --target" + // the backup target to expire + deleteBackupCmd.Flags().StringVar(&Target, "target", "", "The backup to expire, e.g. "+ + "\"20201220-171801F\". Use \"pgo show backup\" to determine the target.") // "pgo delete cluster" // used to delete clusters @@ -131,14 +137,14 @@ func init() { // instructs that any backups associated with a cluster should be deleted deleteClusterCmd.Flags().BoolVarP(&deleteBackups, "delete-backups", "b", false, "Causes the backups for specified cluster to be removed permanently.") - deleteClusterCmd.Flags().MarkDeprecated("delete-backups", + _ = deleteClusterCmd.Flags().MarkDeprecated("delete-backups", "Backups are deleted by default. If you would like to keep your backups, use the --keep-backups flag") // "pgo delete cluster --delete-data" // "pgo delete cluster -d" // instructs that the PostgreSQL cluster data should be deleted deleteClusterCmd.Flags().BoolVarP(&DeleteData, "delete-data", "d", false, "Causes the data for specified cluster to be removed permanently.") - deleteClusterCmd.Flags().MarkDeprecated("delete-data", + _ = deleteClusterCmd.Flags().MarkDeprecated("delete-data", "Data is deleted by default. You can preserve your data by keeping your backups with the --keep-backups flag") // "pgo delete cluster --keep-backups" // instructs that any backups associated with a cluster should be kept and not deleted @@ -164,8 +170,9 @@ func init() { deleteCmd.AddCommand(deleteLabelCmd) // pgo delete label --label // the label to be deleted - deleteLabelCmd.Flags().StringVar(&LabelCmdLabel, "label", "", - "The label to delete for any selected or specified clusters.") + deleteLabelCmd.Flags().StringSliceVar(&UserLabels, "label", []string{}, "Delete "+ + "labels to apply to the PostgreSQL cluster, "+"e.g. \"key=value\", \"prefix/key=value\". "+ + "Can specify flag multiple times.") // "pgo delete label --selector" // "pgo delete label -s" // the selector flag that filters which clusters to delete the cluster @@ -294,22 +301,30 @@ func init() { var deleteBackupCmd = &cobra.Command{ Use: "backup", Short: "Delete a backup", - Long: `Delete a backup. For example: + Long: `Delete a backup from pgBackRest. Requires a target backup. For example: - pgo delete backup mydatabase`, + pgo delete backup clustername --target=20201220-171801F`, Run: func(cmd *cobra.Command, args []string) { + if len(args) == 0 { + fmt.Println("Error: A cluster name is required for this command.") + os.Exit(1) + } + + if Target == "" { + fmt.Println("Error: --target must be specified.") + os.Exit(1) + } + + if !util.AskForConfirmation(NoPrompt, "If you delete a backup that is *not* set to expire, you may be unable to meet your retention requirements. Proceed?") { + fmt.Println("Aborting...") + return + } + if Namespace == "" { Namespace = PGONamespace } - if len(args) == 0 { - fmt.Println("Error: A database or cluster name is required for this command.") - } else { - if util.AskForConfirmation(NoPrompt, "") { - deleteBackup(args, Namespace) - } else { - fmt.Println("Aborting...") - } - } + + deleteBackup(Namespace, args[0]) }, } @@ -453,7 +468,6 @@ var deletePgAdminCmd = &cobra.Command{ } else { if util.AskForConfirmation(NoPrompt, "") { deletePgAdmin(args, Namespace) - } else { fmt.Println("Aborting...") } @@ -477,7 +491,6 @@ var deletePgbouncerCmd = &cobra.Command{ } else { if util.AskForConfirmation(NoPrompt, "") { deletePgbouncer(args, Namespace) - } else { fmt.Println("Aborting...") } @@ -542,16 +555,14 @@ var deleteUserCmd = &cobra.Command{ pgo delete user --username=someuser --selector=name=mycluster`, Run: func(cmd *cobra.Command, args []string) { - if Namespace == "" { Namespace = PGONamespace } - if len(args) == 0 && AllFlag == false && Selector == "" { + if len(args) == 0 && !AllFlag && Selector == "" { fmt.Println("Error: --all, --selector, or a list of clusters is required for this command") } else { if util.AskForConfirmation(NoPrompt, "") { deleteUser(args, Namespace) - } else { fmt.Println("Aborting...") } diff --git a/cmd/pgo/cmd/df.go b/cmd/pgo/cmd/df.go index 16f764830a..99c6b4c4f3 100644 --- a/cmd/pgo/cmd/df.go +++ b/cmd/pgo/cmd/df.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -112,7 +112,6 @@ func init() { dfCmd.Flags().BoolVar(&AllFlag, "all", false, "Get disk utilization for all managed clusters") dfCmd.Flags().StringVarP(&OutputFormat, "output", "o", "", `The output format. Supported types are: "json"`) dfCmd.Flags().StringVarP(&Selector, "selector", "s", "", "The selector to use for cluster filtering.") - } // getPVCType returns a "human readable" form of the PVC @@ -250,7 +249,6 @@ func showDf(namespace, selector string) { // make the request response, err := api.ShowDf(httpclient, &SessionCredentials, request) - // if there is an error, or the response code is not ok, print the error and // exit if err != nil { diff --git a/cmd/pgo/cmd/failover.go b/cmd/pgo/cmd/failover.go index 8b2b6ac8c2..239987d064 100644 --- a/cmd/pgo/cmd/failover.go +++ b/cmd/pgo/cmd/failover.go @@ -2,7 +2,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -19,6 +19,7 @@ package cmd import ( "fmt" "os" + "strings" "github.com/crunchydata/postgres-operator/cmd/pgo/api" "github.com/crunchydata/postgres-operator/cmd/pgo/util" @@ -32,7 +33,12 @@ var failoverCmd = &cobra.Command{ Short: "Performs a manual failover", Long: `Performs a manual failover. For example: - pgo failover mycluster`, + # have the operator select the best target candidate + pgo failover hippo + # get a list of target candidates + pgo failover hippo --query + # failover to a specific target candidate + pgo failover hippo --target=hippo-abcd`, Run: func(cmd *cobra.Command, args []string) { if Namespace == "" { Namespace = PGONamespace @@ -44,54 +50,53 @@ var failoverCmd = &cobra.Command{ if Query { queryFailover(args, Namespace) } else if util.AskForConfirmation(NoPrompt, "") { - if Target == "" { - fmt.Println(`Error: The --target flag is required for failover.`) - return - } createFailover(args, Namespace) } else { fmt.Println("Aborting...") } } - }, } func init() { RootCmd.AddCommand(failoverCmd) - failoverCmd.Flags().BoolVarP(&Query, "query", "", false, "Prints the list of failover candidates.") + failoverCmd.Flags().BoolVar(&Force, "force", false, "Force the failover to occur, regardless "+ + "of the health of the target instance. Must be used with \"--target\".") failoverCmd.Flags().BoolVar(&NoPrompt, "no-prompt", false, "No command line confirmation.") + failoverCmd.Flags().BoolVar(&Query, "query", false, "Prints the list of failover candidates.") failoverCmd.Flags().StringVarP(&Target, "target", "", "", "The replica target which the failover will occur on.") - } // createFailover .... func createFailover(args []string, ns string) { log.Debugf("createFailover called %v", args) - request := new(msgs.CreateFailoverRequest) - request.Namespace = ns - request.ClusterName = args[0] - request.Target = Target - request.ClientVersion = msgs.PGO_VERSION + request := &msgs.CreateFailoverRequest{ + ClientVersion: msgs.PGO_VERSION, + ClusterName: args[0], + Force: Force, + Namespace: ns, + Target: Target, + } response, err := api.CreateFailover(httpclient, &SessionCredentials, request) - if err != nil { fmt.Println("Error: " + err.Error()) - os.Exit(2) + os.Exit(1) } - if response.Status.Code == msgs.Ok { - for k := range response.Results { - fmt.Println(response.Results[k]) + if response.Status.Code != msgs.Ok { + fmt.Println("Error:", strings.ReplaceAll(response.Status.Msg, "Error: ", "")) + + if strings.Contains(response.Status.Msg, "no primary") { + fmt.Println(`Hint: Try using the "--force" flag`) } - } else { - fmt.Println("Error: " + response.Status.Msg) - os.Exit(2) + + os.Exit(1) } + fmt.Println(response.Results) } // queryFailover is a helper function to return the user information about the diff --git a/cmd/pgo/cmd/flags.go b/cmd/pgo/cmd/flags.go index bb831e4006..643af26229 100644 --- a/cmd/pgo/cmd/flags.go +++ b/cmd/pgo/cmd/flags.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -15,38 +15,57 @@ package cmd limitations under the License. */ -//flags used by more than 1 command +// flags used by more than 1 command var DeleteData bool // KeepData, If set to "true", indicates that cluster data should be stored // even after a cluster is deleted. This is DEPRECATED var KeepData bool -var Query bool - -var Target string -var Targets []string - -var OutputFormat string -var Labelselector string -var DebugFlag bool -var Selector string -var DryRun bool -var ScheduleName string -var NodeLabel string - -var BackupType string -var RestoreType string -var BackupOpts string -var BackrestStorageType string - -var RED func(a ...interface{}) string -var YELLOW func(a ...interface{}) string -var GREEN func(a ...interface{}) string - -var Namespace string -var PGONamespace string -var APIServerURL string -var PGO_CA_CERT, PGO_CLIENT_CERT, PGO_CLIENT_KEY string -var PGO_DISABLE_TLS bool -var EXCLUDE_OS_TRUST bool +var ( + // Force indicates that the "force" action should be taken for that step. This + // is different than NoPrompt as "Force" is for indicating that the API server + // must try at all costs + Force bool + + // Query indicates that the attempted request is "querying" information + // instead of taking some action + Query bool +) + +var ( + Target string + Targets []string +) + +var ( + OutputFormat string + Labelselector string + DebugFlag bool + Selector string + DryRun bool + ScheduleName string + NodeLabel string +) + +var ( + BackupType string + RestoreType string + BackupOpts string + BackrestStorageType string +) + +var ( + RED func(a ...interface{}) string + YELLOW func(a ...interface{}) string + GREEN func(a ...interface{}) string +) + +var ( + Namespace string + PGONamespace string + APIServerURL string + PGO_CA_CERT, PGO_CLIENT_CERT, PGO_CLIENT_KEY string + PGO_DISABLE_TLS bool + EXCLUDE_OS_TRUST bool +) diff --git a/cmd/pgo/cmd/label.go b/cmd/pgo/cmd/label.go index db2253d8a9..b1a4d1e559 100644 --- a/cmd/pgo/cmd/label.go +++ b/cmd/pgo/cmd/label.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -25,9 +25,9 @@ import ( "github.com/spf13/cobra" ) -var LabelCmdLabel string -var LabelMap map[string]string -var DeleteLabel bool +var ( + DeleteLabel bool +) var labelCmd = &cobra.Command{ Use: "label", @@ -45,23 +45,25 @@ var labelCmd = &cobra.Command{ log.Debug("label called") if len(args) == 0 && Selector == "" { fmt.Println("Error: A selector or list of clusters is required to label a policy.") - return + os.Exit(1) } - if LabelCmdLabel == "" { + + if len(UserLabels) == 0 { fmt.Println("Error: You must specify the label to apply.") - } else { - labelClusters(args, Namespace) + os.Exit(1) } + + labelClusters(args, Namespace) }, } func init() { RootCmd.AddCommand(labelCmd) + labelCmd.Flags().BoolVar(&DryRun, "dry-run", false, "Shows the clusters that the label would be applied to, without labelling them.") + labelCmd.Flags().StringSliceVar(&UserLabels, "label", []string{}, "Add labels to apply to the PostgreSQL cluster, "+ + "e.g. \"key=value\", \"prefix/key=value\". Can specify flag multiple times.") labelCmd.Flags().StringVarP(&Selector, "selector", "s", "", "The selector to use for cluster filtering.") - labelCmd.Flags().StringVarP(&LabelCmdLabel, "label", "", "", "The new label to apply for any selected or specified clusters.") - labelCmd.Flags().BoolVarP(&DryRun, "dry-run", "", false, "Shows the clusters that the label would be applied to, without labelling them.") - } func labelClusters(clusters []string, ns string) { @@ -77,7 +79,7 @@ func labelClusters(clusters []string, ns string) { r.Namespace = ns r.Selector = Selector r.DryRun = DryRun - r.LabelCmdLabel = LabelCmdLabel + r.Labels = getLabels(UserLabels) r.DeleteLabel = DeleteLabel r.ClientVersion = msgs.PGO_VERSION @@ -100,7 +102,6 @@ func labelClusters(clusters []string, ns string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) } - } // deleteLabel ... @@ -111,7 +112,7 @@ func deleteLabel(args []string, ns string) { req.Selector = Selector req.Namespace = ns req.Args = args - req.LabelCmdLabel = LabelCmdLabel + req.Labels = getLabels(UserLabels) req.ClientVersion = msgs.PGO_VERSION response, err := api.DeleteLabel(httpclient, &SessionCredentials, &req) @@ -127,5 +128,4 @@ func deleteLabel(args []string, ns string) { } else { fmt.Println("Error: " + response.Status.Msg) } - } diff --git a/cmd/pgo/cmd/namespace.go b/cmd/pgo/cmd/namespace.go index baa0f9ce92..32212baa9d 100644 --- a/cmd/pgo/cmd/namespace.go +++ b/cmd/pgo/cmd/namespace.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -54,7 +54,7 @@ func showNamespace(args []string) { r.Args = nsList r.AllFlag = AllFlag - if len(nsList) == 0 && AllFlag == false { + if len(nsList) == 0 && !AllFlag { fmt.Println("Error: namespace args or --all is required") os.Exit(2) } @@ -62,7 +62,6 @@ func showNamespace(args []string) { log.Debugf("showNamespace called %v", nsList) response, err := api.ShowNamespace(httpclient, &SessionCredentials, &r) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -107,7 +106,6 @@ func showNamespace(args []string) { fmt.Printf("%s", accessible) fmt.Printf("%s\n", iAccessible) } - } func createNamespace(args []string, ns string) { @@ -167,8 +165,8 @@ func deleteNamespace(args []string, ns string) { } else { fmt.Println("Error: " + response.Status.Msg) } - } + func updateNamespace(args []string) { var err error @@ -182,7 +180,6 @@ func updateNamespace(args []string) { r.ClientVersion = msgs.PGO_VERSION response, err := api.UpdateNamespace(httpclient, r, &SessionCredentials) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -194,5 +191,4 @@ func updateNamespace(args []string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) } - } diff --git a/cmd/pgo/cmd/pgadmin.go b/cmd/pgo/cmd/pgadmin.go index 8864de9abe..3619ca6ff8 100644 --- a/cmd/pgo/cmd/pgadmin.go +++ b/cmd/pgo/cmd/pgadmin.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -47,7 +47,6 @@ func createPgAdmin(args []string, ns string) { } response, err := api.CreatePgAdmin(httpclient, &SessionCredentials, &request) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(1) @@ -95,7 +94,6 @@ func deletePgAdmin(args []string, ns string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(1) } - } // makeShowPgAdminInterface returns an interface slice of the available values diff --git a/cmd/pgo/cmd/pgbouncer.go b/cmd/pgo/cmd/pgbouncer.go index d787b1ebbe..16bf9d726c 100644 --- a/cmd/pgo/cmd/pgbouncer.go +++ b/cmd/pgo/cmd/pgbouncer.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -23,6 +23,7 @@ import ( "github.com/crunchydata/postgres-operator/cmd/pgo/api" "github.com/crunchydata/postgres-operator/cmd/pgo/util" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" + v1 "k8s.io/api/core/v1" ) // showPgBouncerTextPadding contains the values for what the text padding should be @@ -52,7 +53,6 @@ var PgBouncerReplicas int32 var PgBouncerUninstall bool func createPgbouncer(args []string, ns string) { - if Selector == "" && len(args) == 0 { fmt.Println("Error: The --selector flag is required.") return @@ -68,6 +68,8 @@ func createPgbouncer(args []string, ns string) { Namespace: ns, Replicas: PgBouncerReplicas, Selector: Selector, + ServiceType: v1.ServiceType(ServiceType), + TLSSecret: PgBouncerTLSSecret, } if err := util.ValidateQuantity(request.CPURequest, "cpu"); err != nil { @@ -91,7 +93,6 @@ func createPgbouncer(args []string, ns string) { } response, err := api.CreatePgbouncer(httpclient, &SessionCredentials, &request) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(1) @@ -114,7 +115,6 @@ func createPgbouncer(args []string, ns string) { } func deletePgbouncer(args []string, ns string) { - if Selector == "" && len(args) == 0 { fmt.Println("Error: The --selector flag or a cluster name is required.") return @@ -143,7 +143,6 @@ func deletePgbouncer(args []string, ns string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) } - } // makeShowPgBouncerInterface returns an interface slice of the available values @@ -331,7 +330,6 @@ func showPgBouncer(namespace string, clusterNames []string) { // and make the API request! response, err := api.ShowPgBouncer(httpclient, &SessionCredentials, request) - // if there is a bona-fide error, log and exit if err != nil { fmt.Println("Error:", err.Error()) @@ -371,6 +369,7 @@ func updatePgBouncer(namespace string, clusterNames []string) { Replicas: PgBouncerReplicas, RotatePassword: RotatePassword, Selector: Selector, + ServiceType: v1.ServiceType(ServiceType), } if err := util.ValidateQuantity(request.CPURequest, "cpu"); err != nil { @@ -395,7 +394,6 @@ func updatePgBouncer(namespace string, clusterNames []string) { // and make the API request! response, err := api.UpdatePgBouncer(httpclient, &SessionCredentials, request) - // if there is a bona-fide error, log and exit if err != nil { fmt.Println("Error:", err.Error()) diff --git a/cmd/pgo/cmd/pgdump.go b/cmd/pgo/cmd/pgdump.go index b9b64046bc..92707c88bf 100644 --- a/cmd/pgo/cmd/pgdump.go +++ b/cmd/pgo/cmd/pgdump.go @@ -2,7 +2,7 @@ package cmd /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -57,7 +57,6 @@ func createpgDumpBackup(args []string, ns string) { fmt.Println("No clusters found.") return } - } // pgDump .... @@ -84,8 +83,8 @@ func showpgDump(args []string, ns string) { log.Debugf("response = %v", response) log.Debugf("len of items = %d", len(response.BackupList.Items)) - for _, backup := range response.BackupList.Items { - printDumpCRD(&backup) + for i := range response.BackupList.Items { + printDumpCRD(&response.BackupList.Items[i]) } } } @@ -105,5 +104,4 @@ func printDumpCRD(result *msgs.Pgbackup) { fmt.Printf("%s%s\n", TreeBranch, "Backup User Secret:\t"+result.BackupUserSecret) fmt.Printf("%s%s\n", TreeTrunk, "Backup Port:\t"+result.BackupPort) fmt.Printf("%s%s\n", TreeTrunk, "Backup Opts:\t"+result.BackupOpts) - } diff --git a/cmd/pgo/cmd/pgorole.go b/cmd/pgo/cmd/pgorole.go index 381542d3b4..2ac20cd730 100644 --- a/cmd/pgo/cmd/pgorole.go +++ b/cmd/pgo/cmd/pgorole.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -45,7 +45,6 @@ func updatePgorole(args []string, ns string) { r.ClientVersion = msgs.PGO_VERSION response, err := api.UpdatePgorole(httpclient, &SessionCredentials, r) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -57,11 +56,9 @@ func updatePgorole(args []string, ns string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) } - } func showPgorole(args []string, ns string) { - r := new(msgs.ShowPgoroleRequest) r.PgoroleName = args r.Namespace = ns @@ -74,7 +71,6 @@ func showPgorole(args []string, ns string) { } response, err := api.ShowPgorole(httpclient, &SessionCredentials, r) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -97,11 +93,9 @@ func showPgorole(args []string, ns string) { fmt.Println("pgorole : " + pgorole.Name) fmt.Println("permissions : " + pgorole.Permissions) } - } func createPgorole(args []string, ns string) { - if Permissions == "" { fmt.Println("Error: permissions flag is required.") return @@ -112,7 +106,7 @@ func createPgorole(args []string, ns string) { return } var err error - //create the request + // create the request r := new(msgs.CreatePgoroleRequest) r.PgoroleName = args[0] r.PgorolePermissions = Permissions @@ -133,11 +127,9 @@ func createPgorole(args []string, ns string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) } - } func deletePgorole(args []string, ns string) { - log.Debugf("deletePgorole called %v", args) r := msgs.DeletePgoroleRequest{} @@ -165,5 +157,4 @@ func deletePgorole(args []string, ns string) { } else { fmt.Println("Error: " + response.Status.Msg) } - } diff --git a/cmd/pgo/cmd/pgouser.go b/cmd/pgo/cmd/pgouser.go index 9bfc58167f..086d4c7e52 100644 --- a/cmd/pgo/cmd/pgouser.go +++ b/cmd/pgo/cmd/pgouser.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -47,7 +47,6 @@ func updatePgouser(args []string, ns string) { r.ClientVersion = msgs.PGO_VERSION response, err := api.UpdatePgouser(httpclient, &SessionCredentials, r) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -59,11 +58,9 @@ func updatePgouser(args []string, ns string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) } - } func showPgouser(args []string, ns string) { - r := new(msgs.ShowPgouserRequest) r.PgouserName = args r.Namespace = ns @@ -76,7 +73,6 @@ func showPgouser(args []string, ns string) { } response, err := api.ShowPgouser(httpclient, &SessionCredentials, r) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -100,11 +96,9 @@ func showPgouser(args []string, ns string) { fmt.Printf("roles : %v\n", pgouser.Role) fmt.Printf("namespaces : %v\n", pgouser.Namespace) } - } func createPgouser(args []string, ns string) { - if PgouserPassword == "" { fmt.Println("Error: pgouser-password flag is required.") return @@ -128,7 +122,7 @@ func createPgouser(args []string, ns string) { return } var err error - //create the request + // create the request r := new(msgs.CreatePgouserRequest) r.PgouserName = args[0] r.PgouserPassword = PgouserPassword @@ -152,11 +146,9 @@ func createPgouser(args []string, ns string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) } - } func deletePgouser(args []string, ns string) { - log.Debugf("deletePgouser called %v", args) r := msgs.DeletePgouserRequest{} @@ -184,5 +176,4 @@ func deletePgouser(args []string, ns string) { } else { fmt.Println("Error: " + response.Status.Msg) } - } diff --git a/cmd/pgo/cmd/policy.go b/cmd/pgo/cmd/policy.go index 90d8f04f26..3064e772f5 100644 --- a/cmd/pgo/cmd/policy.go +++ b/cmd/pgo/cmd/policy.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -58,7 +58,6 @@ func init() { applyCmd.Flags().StringVarP(&Selector, "selector", "s", "", "The selector to use for cluster filtering.") applyCmd.Flags().BoolVarP(&DryRun, "dry-run", "", false, "Shows the clusters that the label would be applied to, without labelling them.") - } func applyPolicy(args []string, ns string) { @@ -82,7 +81,6 @@ func applyPolicy(args []string, ns string) { r.ClientVersion = msgs.PGO_VERSION response, err := api.ApplyPolicy(httpclient, &SessionCredentials, r) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -104,10 +102,9 @@ func applyPolicy(args []string, ns string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) } - } -func showPolicy(args []string, ns string) { +func showPolicy(args []string, ns string) { r := new(msgs.ShowPolicyRequest) r.Selector = Selector r.Namespace = ns @@ -122,7 +119,6 @@ func showPolicy(args []string, ns string) { r.Policyname = v response, err := api.ShowPolicy(httpclient, &SessionCredentials, r) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -143,30 +139,24 @@ func showPolicy(args []string, ns string) { for _, policy := range response.PolicyList.Items { fmt.Println("") fmt.Println("policy : " + policy.Spec.Name) - fmt.Println(TreeBranch + "url : " + policy.Spec.URL) fmt.Println(TreeBranch + "status : " + policy.Spec.Status) fmt.Println(TreeTrunk + "sql : " + policy.Spec.SQL) } } - } func createPolicy(args []string, ns string) { - if len(args) == 0 { fmt.Println("Error: A poliicy name argument is required.") return } var err error - //create the request + // create the request r := new(msgs.CreatePolicyRequest) r.Name = args[0] r.Namespace = ns r.ClientVersion = msgs.PGO_VERSION - if PolicyURL != "" { - r.URL = PolicyURL - } if PolicyFile != "" { r.SQL, err = getPolicyString(PolicyFile) @@ -190,7 +180,6 @@ func createPolicy(args []string, ns string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) } - } func getPolicyString(filename string) (string, error) { @@ -205,7 +194,6 @@ func getPolicyString(filename string) (string, error) { } func deletePolicy(args []string, ns string) { - log.Debugf("deletePolicy called %v", args) r := msgs.DeletePolicyRequest{} diff --git a/cmd/pgo/cmd/pvc.go b/cmd/pgo/cmd/pvc.go index 856e134533..08644de2b5 100644 --- a/cmd/pgo/cmd/pvc.go +++ b/cmd/pgo/cmd/pvc.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -34,22 +34,20 @@ func showPVC(args []string, ns string) { r.ClientVersion = msgs.PGO_VERSION if AllFlag { - //special case to just list all the PVCs + // special case to just list all the PVCs r.ClusterName = "" printPVC(&r) } else { - //args are a list of pvc names...for this case show details + // args are a list of pvc names...for this case show details for _, arg := range args { r.ClusterName = arg log.Debugf("show pvc called for %s", arg) printPVC(&r) } } - } func printPVC(r *msgs.ShowPVCRequest) { - response, err := api.ShowPVC(httpclient, r, &SessionCredentials) log.Debugf("response = %v", response) @@ -74,5 +72,4 @@ func printPVC(r *msgs.ShowPVCRequest) { for _, v := range response.Results { fmt.Printf("%-20s\t%-30s\n", v.ClusterName, v.PVCName) } - } diff --git a/cmd/pgo/cmd/reload.go b/cmd/pgo/cmd/reload.go index 415c31a567..20978502b6 100644 --- a/cmd/pgo/cmd/reload.go +++ b/cmd/pgo/cmd/reload.go @@ -2,7 +2,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -27,7 +27,7 @@ import ( "github.com/spf13/cobra" ) -//unused but coming soon to a theatre near you +// unused but coming soon to a theatre near you var ConfigMapName string var reloadCmd = &cobra.Command{ @@ -50,7 +50,6 @@ var reloadCmd = &cobra.Command{ fmt.Println("Aborting...") } } - }, } @@ -59,7 +58,6 @@ func init() { reloadCmd.Flags().StringVarP(&Selector, "selector", "s", "", "The selector to use for cluster filtering.") reloadCmd.Flags().BoolVar(&NoPrompt, "no-prompt", false, "No command line confirmation.") - } // reload .... @@ -71,7 +69,6 @@ func reload(args []string, ns string) { request.Selector = Selector request.Namespace = ns response, err := api.Reload(httpclient, &SessionCredentials, request) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -95,5 +92,4 @@ func reload(args []string, ns string) { fmt.Println("No clusters found.") return } - } diff --git a/cmd/pgo/cmd/restart.go b/cmd/pgo/cmd/restart.go index f784d82004..0f239af6c1 100644 --- a/cmd/pgo/cmd/restart.go +++ b/cmd/pgo/cmd/restart.go @@ -2,7 +2,7 @@ package cmd /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -28,6 +28,8 @@ import ( "github.com/spf13/cobra" ) +var RollingUpdate bool + var restartCmd = &cobra.Command{ Use: "restart", Short: "Restarts the PostgrSQL database within a PostgreSQL cluster", @@ -36,13 +38,15 @@ var restartCmd = &cobra.Command{ For example, to restart the primary and all replicas: pgo restart mycluster + To restart the primary and all replicas using a rolling update strategy: + pgo restart mycluster --rolling + Or target a specific instance within the cluster: pgo restart mycluster --target=mycluster-abcd And use the 'query' flag obtain a list of all instances within the cluster: pgo restart mycluster --query`, Run: func(cmd *cobra.Command, args []string) { - if OutputFormat != "" { if OutputFormat != "json" { fmt.Println("Error: ", "json is the only supported --output format value") @@ -72,18 +76,17 @@ var restartCmd = &cobra.Command{ } func init() { - RootCmd.AddCommand(restartCmd) restartCmd.Flags().BoolVar(&NoPrompt, "no-prompt", false, "No command line confirmation.") restartCmd.Flags().StringVarP(&OutputFormat, "output", "o", "", `The output format. Supported types are: "json"`) restartCmd.Flags().BoolVarP(&Query, "query", "", false, "Prints the list of instances that can be restarted.") + restartCmd.Flags().BoolVar(&RollingUpdate, "rolling", false, "Performs a rolling restart. Cannot be used with other flags.") restartCmd.Flags().StringArrayVarP(&Targets, "target", "", []string{}, "The instance that will be restarted.") } // restart sends a request to restart a PG cluster or one or more instances within it. func restart(clusterName, namespace string) { - log.Debugf("restart called %v", clusterName) request := new(msgs.RestartRequest) @@ -91,6 +94,12 @@ func restart(clusterName, namespace string) { request.ClusterName = clusterName request.Targets = Targets request.ClientVersion = msgs.PGO_VERSION + request.RollingUpdate = RollingUpdate + + if request.RollingUpdate && len(request.Targets) > 0 { + fmt.Println("Error: cannot use --rolling with other flags") + os.Exit(1) + } response, err := api.Restart(httpclient, &SessionCredentials, request) if err != nil { @@ -129,7 +138,6 @@ func restart(clusterName, namespace string) { // instances (the primary and all replicas) within a cluster. This is useful when the user // would like to specify one or more instances for a restart using the "--target" flag. func queryRestart(args []string, namespace string) { - log.Debugf("queryRestart called %v", args) for _, clusterName := range args { diff --git a/cmd/pgo/cmd/restore.go b/cmd/pgo/cmd/restore.go index 4906bb7510..71b80970df 100644 --- a/cmd/pgo/cmd/restore.go +++ b/cmd/pgo/cmd/restore.go @@ -2,7 +2,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -28,8 +28,10 @@ import ( "github.com/spf13/cobra" ) -var PITRTarget string -var BackupPath, BackupPVC string +var ( + PITRTarget string + BackupPath, BackupPVC string +) var restoreCmd = &cobra.Command{ Use: "restore", @@ -54,7 +56,6 @@ var restoreCmd = &cobra.Command{ fmt.Println("Aborting...") } } - }, } @@ -63,13 +64,15 @@ func init() { restoreCmd.Flags().StringVarP(&BackupOpts, "backup-opts", "", "", "The restore options for pgbackrest or pgdump.") restoreCmd.Flags().StringVarP(&PITRTarget, "pitr-target", "", "", "The PITR target, being a PostgreSQL timestamp such as '2018-08-13 11:25:42.582117-04'.") + restoreCmd.Flags().StringVar(&NodeAffinityType, "node-affinity-type", "", "Sets the type of node affinity to use. "+ + "Can be either preferred (default) or required. Must be used with --node-label") restoreCmd.Flags().StringVarP(&NodeLabel, "node-label", "", "", "The node label (key=value) to use when scheduling "+ "the restore job, and in the case of a pgBackRest restore, also the new (i.e. restored) primary deployment. If not set, any node is used.") restoreCmd.Flags().BoolVar(&NoPrompt, "no-prompt", false, "No command line confirmation.") restoreCmd.Flags().StringVarP(&BackupPVC, "backup-pvc", "", "", "The PVC containing the pgdump to restore from.") restoreCmd.Flags().StringVarP(&PGDumpDB, "pgdump-database", "d", "postgres", "The name of the database pgdump will restore.") restoreCmd.Flags().StringVarP(&BackupType, "backup-type", "", "", "The type of backup to restore from, default is pgbackrest. Valid types are pgbackrest or pgdump.") - restoreCmd.Flags().StringVarP(&BackrestStorageType, "pgbackrest-storage-type", "", "", "The type of storage to use for a pgBackRest restore. Either \"local\", \"s3\". (default \"local\")") + restoreCmd.Flags().StringVarP(&BackrestStorageType, "pgbackrest-storage-type", "", "", "The type of storage to use for a pgBackRest restore. Either \"posix\", \"s3\". (default \"posix\")") } // restore .... @@ -89,6 +92,7 @@ func restore(args []string, ns string) { request.PITRTarget = PITRTarget request.FromPVC = BackupPVC // use PVC specified on command line for pgrestore request.PGDumpDB = PGDumpDB + request.NodeAffinityType = getNodeAffinityType(NodeLabel, NodeAffinityType) request.NodeLabel = NodeLabel response, err = api.RestoreDump(httpclient, &SessionCredentials, request) @@ -100,6 +104,7 @@ func restore(args []string, ns string) { request.RestoreOpts = BackupOpts request.PITRTarget = PITRTarget request.NodeLabel = NodeLabel + request.NodeAffinityType = getNodeAffinityType(NodeLabel, NodeAffinityType) request.BackrestStorageType = BackrestStorageType response, err = api.Restore(httpclient, &SessionCredentials, request) @@ -123,5 +128,4 @@ func restore(args []string, ns string) { fmt.Println("No clusters found.") return } - } diff --git a/cmd/pgo/cmd/root.go b/cmd/pgo/cmd/root.go index 249e0a8f91..ffa79db011 100644 --- a/cmd/pgo/cmd/root.go +++ b/cmd/pgo/cmd/root.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -45,11 +45,9 @@ func Execute() { log.Debug(err.Error()) os.Exit(-1) } - } func init() { - cobra.OnInitialize(initConfig) log.Debug("init called") GREEN = color.New(color.FgGreen).SprintFunc() @@ -68,7 +66,6 @@ func init() { RootCmd.PersistentFlags().BoolVar(&PGO_DISABLE_TLS, "disable-tls", false, "Disable TLS authentication to the Postgres Operator.") RootCmd.PersistentFlags().BoolVar(&EXCLUDE_OS_TRUST, "exclude-os-trust", defExclOSTrust, "Exclude CA certs from OS default trust store") RootCmd.PersistentFlags().BoolVar(&DebugFlag, "debug", false, "Enable additional output for debugging.") - } func initConfig() { @@ -107,18 +104,4 @@ func initConfig() { httpclient = hc } } - - if os.Getenv("GENERATE_BASH_COMPLETION") != "" { - generateBashCompletion() - } -} - -func generateBashCompletion() { - log.Debugf("generating bash completion script") - file, err2 := os.Create("/tmp/pgo-bash-completion.out") - if err2 != nil { - fmt.Println("Error: ", err2.Error()) - } - defer file.Close() - RootCmd.GenBashCompletion(file) } diff --git a/cmd/pgo/cmd/scale.go b/cmd/pgo/cmd/scale.go index e25709bc0e..cbedeb8003 100644 --- a/cmd/pgo/cmd/scale.go +++ b/cmd/pgo/cmd/scale.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -24,6 +24,7 @@ import ( msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + v1 "k8s.io/api/core/v1" ) var ReplicaCount int @@ -58,31 +59,46 @@ func init() { scaleCmd.Flags().StringVarP(&ServiceType, "service-type", "", "", "The service type to use in the replica Service. If not set, the default in pgo.yaml will be used.") scaleCmd.Flags().StringVarP(&CCPImageTag, "ccp-image-tag", "", "", "The CCPImageTag to use for cluster creation. If specified, overrides the .pgo.yaml setting.") + scaleCmd.Flags().StringVar(&NodeAffinityType, "node-affinity-type", "", "Sets the type of node affinity to use. "+ + "Can be either preferred (default) or required. Must be used with --node-label") + scaleCmd.Flags().StringVarP(&NodeLabel, "node-label", "", "", "The node label (key) to use in placing the replica database. If not set, any node is used.") scaleCmd.Flags().BoolVar(&NoPrompt, "no-prompt", false, "No command line confirmation.") scaleCmd.Flags().IntVarP(&ReplicaCount, "replica-count", "", 1, "The replica count to apply to the clusters.") scaleCmd.Flags().StringVarP(&StorageConfig, "storage-config", "", "", "The name of a Storage config in pgo.yaml to use for the replica storage.") - scaleCmd.Flags().StringVarP(&NodeLabel, "node-label", "", "", "The node label (key) to use in placing the replica database. If not set, any node is used.") + scaleCmd.Flags().StringSliceVar(&Tolerations, "toleration", []string{}, + "Set Pod tolerations for each PostgreSQL instance in a cluster.\n"+ + "The general format is \"key=value:Effect\"\n"+ + "For example, to add an Exists and an Equals toleration: \"--toleration=ssd:NoSchedule,zone=east:NoSchedule\"") } func scaleCluster(args []string, ns string) { - for _, arg := range args { - log.Debugf(" %s ReplicaCount is %d", arg, ReplicaCount) - response, err := api.ScaleCluster(httpclient, arg, ReplicaCount, - StorageConfig, NodeLabel, CCPImageTag, ServiceType, &SessionCredentials, ns) + request := msgs.ClusterScaleRequest{ + CCPImageTag: CCPImageTag, + Name: arg, + Namespace: ns, + NodeAffinityType: getNodeAffinityType(NodeLabel, NodeAffinityType), + NodeLabel: NodeLabel, + ReplicaCount: ReplicaCount, + ServiceType: v1.ServiceType(ServiceType), + StorageConfig: StorageConfig, + Tolerations: getClusterTolerations(Tolerations, false), + } + + response, err := api.ScaleCluster(httpclient, &SessionCredentials, request) if err != nil { fmt.Println("Error: " + err.Error()) - os.Exit(2) + os.Exit(1) } - if response.Status.Code == msgs.Ok { - for _, v := range response.Results { - fmt.Println(v) - } - } else { + if response.Status.Code != msgs.Ok { fmt.Println("Error: " + response.Status.Msg) + os.Exit(1) } + for _, v := range response.Results { + fmt.Println(v) + } } } diff --git a/cmd/pgo/cmd/scaledown.go b/cmd/pgo/cmd/scaledown.go index be60d27ed7..6ec996fa34 100644 --- a/cmd/pgo/cmd/scaledown.go +++ b/cmd/pgo/cmd/scaledown.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -66,7 +66,7 @@ func init() { scaledownCmd.Flags().StringVarP(&Target, "target", "", "", "The replica to target for scaling down") scaledownCmd.Flags().BoolVarP(&DeleteData, "delete-data", "d", true, "Causes the data for the scaled down replica to be removed permanently.") - scaledownCmd.Flags().MarkDeprecated("delete-data", "Data is deleted by default.") + _ = scaledownCmd.Flags().MarkDeprecated("delete-data", "Data is deleted by default.") scaledownCmd.Flags().BoolVar(&KeepData, "keep-data", false, "Causes data for the scale down replica to *not* be deleted") scaledownCmd.Flags().BoolVar(&NoPrompt, "no-prompt", false, "No command line confirmation.") @@ -76,7 +76,6 @@ func init() { // available replicas that can be scaled down. This is called when the "--query" // flag is specified func queryCluster(args []string, ns string) { - // iterate through the clusters and output information about each one for _, arg := range args { @@ -134,7 +133,6 @@ func queryCluster(args []string, ns string) { } func scaleDownCluster(clusterName, ns string) { - // determine if the data should be deleted. The modern flag for handling this // is "KeepData" which defaults to "false". We will honor the "DeleteData" // flag (which defaults to "true"), but this will be removed in a future @@ -143,7 +141,6 @@ func scaleDownCluster(clusterName, ns string) { response, err := api.ScaleDownCluster(httpclient, clusterName, Target, deleteData, &SessionCredentials, ns) - if err != nil { fmt.Println("Error: ", err.Error()) return @@ -156,5 +153,4 @@ func scaleDownCluster(clusterName, ns string) { } else { fmt.Println("Error: " + response.Status.Msg) } - } diff --git a/cmd/pgo/cmd/schedule.go b/cmd/pgo/cmd/schedule.go index c91018c016..df5d51ea1c 100644 --- a/cmd/pgo/cmd/schedule.go +++ b/cmd/pgo/cmd/schedule.go @@ -2,7 +2,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -81,7 +81,6 @@ func createSchedule(args []string, ns string) { } response, err := api.CreateSchedule(httpclient, &SessionCredentials, r) - if err != nil { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) @@ -100,7 +99,6 @@ func createSchedule(args []string, ns string) { fmt.Println("No clusters found.") return } - } func deleteSchedule(args []string, ns string) { @@ -124,7 +122,6 @@ func deleteSchedule(args []string, ns string) { } response, err := api.DeleteSchedule(httpclient, &SessionCredentials, r) - if err != nil { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) @@ -143,7 +140,6 @@ func deleteSchedule(args []string, ns string) { fmt.Println("No schedules found.") return } - } func showSchedule(args []string, ns string) { @@ -169,7 +165,6 @@ func showSchedule(args []string, ns string) { } response, err := api.ShowSchedule(httpclient, &SessionCredentials, r) - if err != nil { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) diff --git a/cmd/pgo/cmd/show.go b/cmd/pgo/cmd/show.go index b42ba2a7d9..6f46aed52c 100644 --- a/cmd/pgo/cmd/show.go +++ b/cmd/pgo/cmd/show.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -22,8 +22,10 @@ import ( "github.com/spf13/cobra" ) -const TreeBranch = "\t" -const TreeTrunk = "\t" +const ( + TreeBranch = "\t" + TreeTrunk = "\t" +) var AllFlag bool @@ -80,7 +82,6 @@ Valid resource types include: * user`) } } - }, } @@ -327,7 +328,7 @@ var ShowUserCmd = &cobra.Command{ if Namespace == "" { Namespace = PGONamespace } - if Selector == "" && AllFlag == false && len(args) == 0 { + if Selector == "" && !AllFlag && len(args) == 0 { fmt.Println("Error: --selector, --all, or cluster name()s required for this command") } else { showUser(args, Namespace) diff --git a/cmd/pgo/cmd/status.go b/cmd/pgo/cmd/status.go index f7cea5a946..562604b18f 100644 --- a/cmd/pgo/cmd/status.go +++ b/cmd/pgo/cmd/status.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -33,11 +33,9 @@ func init() { RootCmd.AddCommand(statusCmd) statusCmd.Flags().StringVarP(&OutputFormat, "output", "o", "", "The output format. Currently, json is the only supported value.") - } func showStatus(args []string, ns string) { - log.Debugf("showStatus called %v", args) if OutputFormat != "" && OutputFormat != "json" { @@ -46,7 +44,6 @@ func showStatus(args []string, ns string) { } response, err := api.ShowStatus(httpclient, &SessionCredentials, ns) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -67,11 +64,9 @@ func showStatus(args []string, ns string) { } printSummary(&response.Result) - } func printSummary(status *msgs.StatusDetail) { - WID := 25 fmt.Printf("%s%d\n", util.Rpad("Databases:", " ", WID), status.NumDatabases) fmt.Printf("%s%d\n", util.Rpad("Claims:", " ", WID), status.NumClaims) diff --git a/cmd/pgo/cmd/test.go b/cmd/pgo/cmd/test.go index a25ce23b9c..f6503673a9 100644 --- a/cmd/pgo/cmd/test.go +++ b/cmd/pgo/cmd/test.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -57,11 +57,9 @@ func init() { testCmd.Flags().StringVarP(&Selector, "selector", "s", "", "The selector to use for cluster filtering.") testCmd.Flags().StringVarP(&OutputFormat, "output", "o", "", "The output format. Currently, json is the only supported value.") testCmd.Flags().BoolVar(&AllFlag, "all", false, "test all resources.") - } func showTest(args []string, ns string) { - log.Debugf("showCluster called %v", args) log.Debugf("selector is %s", Selector) @@ -110,7 +108,7 @@ func showTest(args []string, ns string) { for _, result := range response.Results { fmt.Println("") - fmt.Println(fmt.Sprintf("cluster : %s", result.ClusterName)) + fmt.Printf("cluster : %s\n", result.ClusterName) // first, print the test results for the endpoints, which make up // the services @@ -124,15 +122,15 @@ func showTest(args []string, ns string) { // prints out a set of test results func printTestResults(testName string, results []msgs.ClusterTestDetail) { // print out the header for this group of tests - fmt.Println(fmt.Sprintf("%s%s", TreeBranch, testName)) + fmt.Printf("%s%s\n", TreeBranch, testName) // iterate though the results and print them! for _, v := range results { fmt.Printf("%s%s%s (%s): ", TreeBranch, TreeBranch, v.InstanceType, v.Message) if v.Available { - fmt.Println(fmt.Sprintf("%s", GREEN("UP"))) + fmt.Println(GREEN("UP")) } else { - fmt.Println(fmt.Sprintf("%s", RED("DOWN"))) + fmt.Println(RED("DOWN")) } } } diff --git a/cmd/pgo/cmd/update.go b/cmd/pgo/cmd/update.go index 798ccf36f7..9d6544a6a1 100644 --- a/cmd/pgo/cmd/update.go +++ b/cmd/pgo/cmd/update.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -29,11 +29,22 @@ var ( // DisableLogin allows a user to disable the ability for a PostgreSQL uesr to // log in DisableLogin bool + // DisableMetrics allows a user to disable metrics collection + DisableMetrics bool + // DisablePGBadger allows a user to disable pgBadger + DisablePGBadger bool // EnableLogin allows a user to enable the ability for a PostgreSQL uesr to // log in EnableLogin bool + // EnableMetrics allows a user to enbale metrics collection + EnableMetrics bool + // EnablePGBadger allows a user to enbale pgBadger + EnablePGBadger bool // ExpireUser sets a user to having their password expired ExpireUser bool + // ExporterRotatePassword rotates the password for the designed PostgreSQL + // user for handling metrics scraping + ExporterRotatePassword bool // PgoroleChangePermissions does something with the pgouser access controls, // I'm not sure but I wanted this at least to be documented PgoroleChangePermissions bool @@ -83,6 +94,10 @@ func init() { UpdateClusterCmd.Flags().StringVar(&CPULimit, "cpu-limit", "", "Set the number of millicores to limit for the CPU, e.g. "+ "\"100m\" or \"0.1\".") UpdateClusterCmd.Flags().BoolVar(&DisableAutofailFlag, "disable-autofail", false, "Disables autofail capabitilies in the cluster.") + UpdateClusterCmd.Flags().BoolVar(&DisableMetrics, "disable-metrics", false, + "Disable the metrics collection sidecar. May cause brief downtime.") + UpdateClusterCmd.Flags().BoolVar(&DisablePGBadger, "disable-pgbadger", false, + "Disable the pgBadger sidecar. May cause brief downtime.") UpdateClusterCmd.Flags().BoolVar(&EnableAutofailFlag, "enable-autofail", false, "Enables autofail capabitilies in the cluster.") UpdateClusterCmd.Flags().StringVar(&MemoryRequest, "memory", "", "Set the amount of RAM to request, e.g. "+ "1GiB.") @@ -107,9 +122,14 @@ func init() { "the Crunchy Postgres Exporter sidecar container.") UpdateClusterCmd.Flags().StringVar(&ExporterMemoryLimit, "exporter-memory-limit", "", "Set the amount of memory to limit for "+ "the Crunchy Postgres Exporter sidecar container.") - + UpdateClusterCmd.Flags().BoolVar(&EnableMetrics, "enable-metrics", false, + "Enable the metrics collection sidecar. May cause brief downtime.") + UpdateClusterCmd.Flags().BoolVar(&EnablePGBadger, "enable-pgbadger", false, + "Enable the pgBadger sidecar. May cause brief downtime.") + UpdateClusterCmd.Flags().BoolVar(&ExporterRotatePassword, "exporter-rotate-password", false, "Used to rotate the password for the metrics collection agent.") UpdateClusterCmd.Flags().BoolVarP(&EnableStandby, "enable-standby", "", false, "Enables standby mode in the cluster(s) specified.") + UpdateClusterCmd.Flags().StringVar(&ServiceType, "service-type", "", "The Service type to use for the PostgreSQL cluster. If not set, the pgo.yaml default will be used.") UpdateClusterCmd.Flags().BoolVar(&Startup, "startup", false, "Restart the database cluster if it "+ "is currently shutdown.") UpdateClusterCmd.Flags().BoolVar(&Shutdown, "shutdown", false, "Shutdown the database "+ @@ -124,6 +144,12 @@ func init() { "Follows the Kubernetes quantity format.\n\n"+ "For example, to create a tablespace with the NFS storage configuration with a PVC of size 10GiB:\n\n"+ "--tablespace=name=ts1:storageconfig=nfsstorage:pvcsize=10Gi") + UpdateClusterCmd.Flags().StringSliceVar(&Tolerations, "toleration", []string{}, + "Set Pod tolerations for each PostgreSQL instance in a cluster.\n"+ + "The general format is \"key=value:Effect\"\n"+ + "For example, to add an Exists and an Equals toleration: \"--toleration=ssd:NoSchedule,zone=east:NoSchedule\"\n"+ + "A toleration can be removed by adding a \"-\" to the end, for example:\n"+ + "--toleration=ssd:NoSchedule-") UpdatePgBouncerCmd.Flags().StringVar(&PgBouncerCPURequest, "cpu", "", "Set the number of millicores to request for CPU "+ "for pgBouncer.") UpdatePgBouncerCmd.Flags().StringVar(&PgBouncerCPULimit, "cpu-limit", "", "Set the number of millicores to limit for CPU "+ @@ -137,6 +163,7 @@ func init() { UpdatePgBouncerCmd.Flags().Int32Var(&PgBouncerReplicas, "replicas", 0, "Set the total number of pgBouncer instances to deploy. If not set, defaults to 1.") UpdatePgBouncerCmd.Flags().BoolVar(&RotatePassword, "rotate-password", false, "Used to rotate the pgBouncer service account password. Can cause interruption of service.") UpdatePgBouncerCmd.Flags().StringVarP(&Selector, "selector", "s", "", "The selector to use for cluster filtering.") + UpdatePgBouncerCmd.Flags().StringVar(&ServiceType, "service-type", "", "The Service type to use for pgBouncer.") UpdatePgouserCmd.Flags().StringVarP(&PgouserNamespaces, "pgouser-namespaces", "", "", "The namespaces to use for updating the pgouser roles.") UpdatePgouserCmd.Flags().BoolVar(&AllNamespaces, "all-namespaces", false, "all namespaces.") UpdatePgouserCmd.Flags().StringVarP(&PgouserRoles, "pgouser-roles", "", "", "The roles to use for updating the pgouser roles.") @@ -160,7 +187,7 @@ func init() { UpdateUserCmd.Flags().BoolVar(&PasswordValidAlways, "valid-always", false, "Sets a password to never expire based on expiration time. Takes precedence over --valid-days") UpdateUserCmd.Flags().BoolVar(&RotatePassword, "rotate-password", false, "Rotates the user's password with an automatically generated password. The length of the password is determine by either --password-length or the value set on the server, in that order.") UpdateUserCmd.Flags().StringVarP(&Selector, "selector", "s", "", "The selector to use for cluster filtering.") - + UpdateUserCmd.Flags().BoolVar(&ShowSystemAccounts, "set-system-account-password", false, "Allows for a system account password to be set.") } // UpdateCmd represents the update command @@ -169,8 +196,8 @@ var UpdateCmd = &cobra.Command{ Short: "Update a pgouser, pgorole, or cluster", Long: `The update command allows you to update a pgouser, pgorole, or cluster. For example: - pgo update cluster --selector=name=mycluster --autofail=false - pgo update cluster --all --autofail=true + pgo update cluster --selector=name=mycluster --disable-autofail + pgo update cluster --all --enable-autofail pgo update namespace mynamespace pgo update pgbouncer mycluster --rotate-password pgo update pgorole somerole --pgorole-permission="Cat" @@ -180,7 +207,6 @@ var UpdateCmd = &cobra.Command{ pgo update pgorole somerole --pgorole-permission="Cat" pgo update user mycluster --username=testuser --selector=name=mycluster --password=somepassword`, Run: func(cmd *cobra.Command, args []string) { - if len(args) == 0 { fmt.Println(`Error: You must specify the type of resource to update. Valid resource types include: * cluster @@ -203,7 +229,6 @@ var UpdateCmd = &cobra.Command{ * user`) } } - }, } @@ -215,7 +240,7 @@ var UpdateClusterCmd = &cobra.Command{ Short: "Update a PostgreSQL cluster", Long: `Update a PostgreSQL cluster. For example: - pgo update cluster mycluster --autofail=false + pgo update cluster mycluster --disable-autofail pgo update cluster mycluster myothercluster --disable-autofail pgo update cluster --selector=name=mycluster --disable-autofail pgo update cluster --all --enable-autofail`, @@ -248,6 +273,14 @@ var UpdateClusterCmd = &cobra.Command{ "from has been properly shutdown before proceeding!") } + if EnableMetrics || DisableMetrics { + fmt.Println("Adding or removing a metrics collection sidecar can cause downtime.") + } + + if EnablePGBadger || DisablePGBadger { + fmt.Println("Adding or removing a pgBadger sidecar can cause downtime.") + } + if len(Tablespaces) > 0 { fmt.Println("Adding tablespaces can cause downtime.") } @@ -300,7 +333,6 @@ pgo update user mycluster --username=foobar --disable-login pgo update user mycluster --username=foobar --enable-login `, Run: func(cmd *cobra.Command, args []string) { - if Namespace == "" { Namespace = PGONamespace } @@ -364,7 +396,6 @@ var UpdatePgouserCmd = &cobra.Command{ pgo update pgouser myuser --pgouser-password=somepassword --pgouser-roles=somerole pgo update pgouser myuser --pgouser-password=somepassword --no-prompt`, Run: func(cmd *cobra.Command, args []string) { - if Namespace == "" { Namespace = PGONamespace } @@ -376,13 +407,13 @@ var UpdatePgouserCmd = &cobra.Command{ } }, } + var UpdatePgoroleCmd = &cobra.Command{ Use: "pgorole", Short: "Update a pgorole", Long: `UPDATE allows you to update a pgo role. For example: pgo update pgorole somerole --permissions="Cat,Ls`, Run: func(cmd *cobra.Command, args []string) { - if Namespace == "" { Namespace = PGONamespace } @@ -401,7 +432,6 @@ var UpdateNamespaceCmd = &cobra.Command{ Long: `UPDATE allows you to update a Namespace. For example: pgo update namespace mynamespace`, Run: func(cmd *cobra.Command, args []string) { - if len(args) == 0 { fmt.Println("Error: You must specify the name of a Namespace.") } else { diff --git a/cmd/pgo/cmd/upgrade.go b/cmd/pgo/cmd/upgrade.go index 0e734ade5e..969e80956c 100644 --- a/cmd/pgo/cmd/upgrade.go +++ b/cmd/pgo/cmd/upgrade.go @@ -2,7 +2,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -41,10 +41,10 @@ var UpgradeCCPImageTag string var UpgradeCmd = &cobra.Command{ Use: "upgrade", Short: "Perform a cluster upgrade.", - Long: `UPGRADE allows you to perform a comprehensive PGCluster upgrade - (for use after performing a Postgres Operator upgrade). + Long: `UPGRADE allows you to perform a comprehensive PGCluster upgrade + (for use after performing a Postgres Operator upgrade). For example: - + pgo upgrade mycluster Upgrades the cluster for use with the upgraded Postgres Operator version.`, Run: func(cmd *cobra.Command, args []string) { @@ -69,8 +69,9 @@ func init() { RootCmd.AddCommand(UpgradeCmd) // flags for "pgo upgrade" - UpgradeCmd.Flags().BoolVarP(&IgnoreValidation, "ignore-validation", "", false, "Disables version checking against the image tags when performing an cluster upgrade.") UpgradeCmd.Flags().StringVarP(&UpgradeCCPImageTag, "ccp-image-tag", "", "", "The image tag to use for cluster creation. If specified, it overrides the default configuration setting and disables tag validation checking.") + UpgradeCmd.Flags().BoolVarP(&IgnoreValidation, "ignore-validation", "", false, "Disables version checking against the image tags when performing an cluster upgrade.") + UpgradeCmd.Flags().BoolVar(&NoPrompt, "no-prompt", false, "No command line confirmation.") } func createUpgrade(args []string, ns string) { @@ -90,7 +91,6 @@ func createUpgrade(args []string, ns string) { request.UpgradeCCPImageTag = UpgradeCCPImageTag response, err := api.CreateUpgrade(httpclient, &SessionCredentials, &request) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -104,5 +104,4 @@ func createUpgrade(args []string, ns string) { fmt.Println("Error: " + response.Status.Msg) os.Exit(2) } - } diff --git a/cmd/pgo/cmd/user.go b/cmd/pgo/cmd/user.go index a304ce4870..b3b005016c 100644 --- a/cmd/pgo/cmd/user.go +++ b/cmd/pgo/cmd/user.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -22,6 +22,7 @@ import ( "github.com/crunchydata/postgres-operator/cmd/pgo/api" "github.com/crunchydata/postgres-operator/cmd/pgo/util" + "github.com/crunchydata/postgres-operator/internal/apiserver" utiloperator "github.com/crunchydata/postgres-operator/internal/util" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" @@ -55,7 +56,8 @@ var PasswordLength int var PasswordValidAlways bool // ShowSystemAccounts enables the display of the PostgreSQL user accounts that -// perform system functions, such as the "postgres" user +// perform system functions, such as the "postgres" user, and for taking action +// on these accounts var ShowSystemAccounts bool func createUser(args []string, ns string) { @@ -88,13 +90,12 @@ func createUser(args []string, ns string) { } // determine if the user provies a valid password type - if _, err := msgs.GetPasswordType(PasswordType); err != nil { + if _, err := apiserver.GetPasswordType(PasswordType); err != nil { fmt.Println("Error:", err.Error()) os.Exit(1) } response, err := api.CreateUser(httpclient, &SessionCredentials, &request) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(1) @@ -113,7 +114,6 @@ func createUser(args []string, ns string) { // deleteUser ... func deleteUser(args []string, ns string) { - log.Debugf("deleting user %s selector=%s args=%v", Username, Selector, args) if Username == "" { @@ -130,7 +130,6 @@ func deleteUser(args []string, ns string) { } response, err := api.DeleteUser(httpclient, &SessionCredentials, &request) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(1) @@ -348,7 +347,6 @@ func showUser(args []string, ns string) { } response, err := api.ShowUser(httpclient, &SessionCredentials, &request) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(1) @@ -370,20 +368,21 @@ func showUser(args []string, ns string) { func updateUser(clusterNames []string, namespace string) { // set up the reuqest request := msgs.UpdateUserRequest{ - AllFlag: AllFlag, - Clusters: clusterNames, - Expired: Expired, - ExpireUser: ExpireUser, - ManagedUser: ManagedUser, - Namespace: namespace, - Password: Password, - PasswordAgeDays: PasswordAgeDays, - PasswordLength: PasswordLength, - PasswordValidAlways: PasswordValidAlways, - PasswordType: PasswordType, - RotatePassword: RotatePassword, - Selector: Selector, - Username: strings.TrimSpace(Username), + AllFlag: AllFlag, + Clusters: clusterNames, + Expired: Expired, + ExpireUser: ExpireUser, + ManagedUser: ManagedUser, + Namespace: namespace, + Password: Password, + PasswordAgeDays: PasswordAgeDays, + PasswordLength: PasswordLength, + PasswordValidAlways: PasswordValidAlways, + PasswordType: PasswordType, + RotatePassword: RotatePassword, + Selector: Selector, + SetSystemAccountPassword: ShowSystemAccounts, + Username: strings.TrimSpace(Username), } // check to see if EnableLogin or DisableLogin is set. If so, set a value @@ -395,19 +394,19 @@ func updateUser(clusterNames []string, namespace string) { } // check to see if this is a system account if a user name is passed in - if request.Username != "" && utiloperator.IsPostgreSQLUserSystemAccount(request.Username) { - fmt.Println("Error:", request.Username, "is a system account and cannot be used") + if request.Username != "" && utiloperator.IsPostgreSQLUserSystemAccount(request.Username) && !request.SetSystemAccountPassword { + fmt.Println("Error:", request.Username, "is a system account and cannot be used. "+ + "You can override this with the \"--set-system-account-password\" flag.") os.Exit(1) } // determine if the user provies a valid password type - if _, err := msgs.GetPasswordType(PasswordType); err != nil { + if _, err := apiserver.GetPasswordType(PasswordType); err != nil { fmt.Println("Error:", err.Error()) os.Exit(1) } response, err := api.UpdateUser(httpclient, &SessionCredentials, &request) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(1) diff --git a/cmd/pgo/cmd/version.go b/cmd/pgo/cmd/version.go index f66d22e5e2..b7c45a2c1c 100644 --- a/cmd/pgo/cmd/version.go +++ b/cmd/pgo/cmd/version.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -47,7 +47,6 @@ func init() { } func showVersion() { - // print the client version fmt.Println("pgo client version " + msgs.PGO_VERSION) @@ -58,7 +57,6 @@ func showVersion() { // otherwise, get the server version response, err := api.ShowVersion(httpclient, &SessionCredentials) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) diff --git a/cmd/pgo/cmd/watch.go b/cmd/pgo/cmd/watch.go index 69d288a1d8..3ddff23f93 100644 --- a/cmd/pgo/cmd/watch.go +++ b/cmd/pgo/cmd/watch.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,14 +17,15 @@ package cmd import ( "fmt" - "github.com/nsqio/go-nsq" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" "math/rand" "os" "os/signal" "syscall" "time" + + "github.com/nsqio/go-nsq" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" ) type TailHandler struct { @@ -45,7 +46,7 @@ var watchCmd = &cobra.Command{ } log.Debug("watch called") - watch(args, Namespace) + watch(args) }, } @@ -57,7 +58,7 @@ func init() { watchCmd.Flags().StringVarP(&PGOEventAddress, "pgo-event-address", "a", "localhost:14150", "The address (host:port) where the event stream is.") } -func watch(args []string, ns string) { +func watch(args []string) { log.Debugf("watch called %v", args) if len(args) == 0 { @@ -66,10 +67,11 @@ func watch(args []string, ns string) { topic := args[0] - var totalMessages = 0 + totalMessages := 0 var channel string rand.Seed(time.Now().UnixNano()) + // #nosec: G404 channel = fmt.Sprintf("tail%06d#ephemeral", rand.Int()%999999) sigChan := make(chan os.Signal, 1) @@ -107,7 +109,6 @@ func watch(args []string, ns string) { for _, consumer := range consumers { <-consumer.StopChan } - } func (th *TailHandler) HandleMessage(m *nsq.Message) error { diff --git a/cmd/pgo/cmd/workflow.go b/cmd/pgo/cmd/workflow.go index 56276cce9e..944544b732 100644 --- a/cmd/pgo/cmd/workflow.go +++ b/cmd/pgo/cmd/workflow.go @@ -1,7 +1,7 @@ package cmd /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -34,13 +34,10 @@ func showWorkflow(args []string, ns string) { } printWorkflow(args[0], ns) - } func printWorkflow(id, ns string) { - response, err := api.ShowWorkflow(httpclient, id, &SessionCredentials, ns) - if err != nil { fmt.Println("Error: " + err.Error()) os.Exit(2) @@ -58,5 +55,4 @@ func printWorkflow(id, ns string) { for k, v := range response.Results.Parameters { fmt.Printf("%s%s\n", util.Rpad(k, " ", 20), v) } - } diff --git a/cmd/pgo/generatedocs.go b/cmd/pgo/generatedocs.go index ddd859f214..177577c89d 100644 --- a/cmd/pgo/generatedocs.go +++ b/cmd/pgo/generatedocs.go @@ -3,7 +3,7 @@ package main /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -35,7 +35,6 @@ title: "%s" ` func main() { - fmt.Println("generate CLI markdown") filePrepender := func(filename string) string { diff --git a/cmd/pgo/main.go b/cmd/pgo/main.go index f868a3ac7f..c3f1c3ae45 100644 --- a/cmd/pgo/main.go +++ b/cmd/pgo/main.go @@ -1,7 +1,7 @@ package main /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -28,5 +28,4 @@ func main() { fmt.Println(err) os.Exit(1) } - } diff --git a/cmd/pgo/util/confirmation.go b/cmd/pgo/util/confirmation.go index c227055cb1..a79747a735 100644 --- a/cmd/pgo/util/confirmation.go +++ b/cmd/pgo/util/confirmation.go @@ -1,7 +1,7 @@ package util /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/cmd/pgo/util/pad.go b/cmd/pgo/util/pad.go index 276469471a..a68dc7852d 100644 --- a/cmd/pgo/util/pad.go +++ b/cmd/pgo/util/pad.go @@ -1,7 +1,7 @@ package util /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/cmd/pgo/util/validation.go b/cmd/pgo/util/validation.go index 7d90f6a6ac..1cc1de167b 100644 --- a/cmd/pgo/util/validation.go +++ b/cmd/pgo/util/validation.go @@ -1,7 +1,7 @@ package util /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -31,7 +31,6 @@ var validResourceName = regexp.MustCompile(`^[a-z0-9.\-]+$`).MatchString // https://kubernetes.io/docs/concepts/overview/working-with-objects/names/ // func IsValidForResourceName(target string) bool { - log.Debugf("IsValidForResourceName: %s", target) return validResourceName(target) @@ -48,7 +47,7 @@ func IsValidForResourceName(target string) bool { func ValidateQuantity(quantity, flag string) error { if quantity != "" { if _, err := resource.ParseQuantity(quantity); err != nil { - return fmt.Errorf("Error: \"%s\" - %s", flag, err.Error()) + return fmt.Errorf("Error: \"%s\" - %w", flag, err) } } diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index 325303c9a2..3bfb05e485 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -1,7 +1,7 @@ package main /* -Copyright 2017 - 2020 Crunchy Data +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -39,9 +39,8 @@ import ( ) func main() { - debugFlag := os.Getenv("CRUNCHY_DEBUG") - //add logging configuration + // add logging configuration crunchylog.CrunchyLogger(crunchylog.SetParameters()) if debugFlag == "true" { log.SetLevel(log.DebugLevel) @@ -50,13 +49,12 @@ func main() { log.Info("debug flag set to false") } - //give time for pgo-event to start up + // give time for pgo-event to start up time.Sleep(time.Duration(5) * time.Second) client, err := kubeapi.NewClient() if err != nil { - log.Error(err) - os.Exit(2) + log.Fatal(err) } operator.Initialize(client) @@ -66,8 +64,7 @@ func main() { // list of target namespaces for the operator install namespaceList, err := operator.SetupNamespaces(client) if err != nil { - log.Errorf("Error configuring operator namespaces: %v", err) - os.Exit(2) + log.Fatalf("Error configuring operator namespaces: %v", err) } // set up signals so we handle the first shutdown signal gracefully @@ -78,8 +75,7 @@ func main() { controllerManager, err := manager.NewControllerManager(namespaceList, operator.Pgo, operator.PgoNamespace, operator.InstallationName, operator.NamespaceOperatingMode()) if err != nil { - log.Error(err) - os.Exit(2) + log.Fatal(err) } log.Debug("controller manager created") @@ -115,7 +111,6 @@ func main() { // createAndStartNamespaceController creates a namespace controller and then starts it func createAndStartNamespaceController(kubeClientset kubernetes.Interface, controllerManager controller.Manager, stopCh <-chan struct{}) error { - nsKubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClientset, time.Duration(*operator.Pgo.Pgo.NamespaceRefreshInterval)*time.Second, kubeinformers.WithTweakListOptions(func(options *metav1.ListOptions) { diff --git a/conf/postgres-operator/pgo.yaml b/conf/postgres-operator/pgo.yaml index ff5c97ec7f..a5289e1f6e 100644 --- a/conf/postgres-operator/pgo.yaml +++ b/conf/postgres-operator/pgo.yaml @@ -2,7 +2,7 @@ Cluster: CCPImagePrefix: registry.developers.crunchydata.com/crunchydata Metrics: false Badger: false - CCPImageTag: centos7-12.4-4.5.0 + CCPImageTag: ubi8-13.10-4.6.10 Port: 5432 PGBadgerPort: 10000 ExporterPort: 9187 @@ -11,7 +11,6 @@ Cluster: PasswordAgeDays: 0 PasswordLength: 24 Replicas: 0 - ArchiveMode: false ServiceType: ClusterIP BackrestPort: 2022 BackrestS3Bucket: @@ -28,7 +27,6 @@ Cluster: DefaultBackrestMemory: DefaultPgBouncerMemory: DefaultExporterMemory: - DisableFSGroup: false PrimaryStorage: default WALStorage: BackupStorage: default @@ -82,4 +80,4 @@ Storage: Pgo: Audit: false PGOImagePrefix: registry.developers.crunchydata.com/crunchydata - PGOImageTag: centos7-4.5.0 + PGOImageTag: ubi8-4.6.10 diff --git a/crunchy_logo.png b/crunchy_logo.png deleted file mode 100644 index 2fbf3352c1..0000000000 Binary files a/crunchy_logo.png and /dev/null differ diff --git a/deploy/add-targeted-namespace-reconcile-rbac.sh b/deploy/add-targeted-namespace-reconcile-rbac.sh index 8438c10912..420eb343d6 100755 --- a/deploy/add-targeted-namespace-reconcile-rbac.sh +++ b/deploy/add-targeted-namespace-reconcile-rbac.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2020 Crunchy Data Solutions, Inc. +# Copyright 2020 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/deploy/add-targeted-namespace.sh b/deploy/add-targeted-namespace.sh index af088314d9..80163fa2c3 100755 --- a/deploy/add-targeted-namespace.sh +++ b/deploy/add-targeted-namespace.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/deploy/cleannamespaces.sh b/deploy/cleannamespaces.sh index 66cd693863..983f92f1b0 100755 --- a/deploy/cleannamespaces.sh +++ b/deploy/cleannamespaces.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/deploy/cleanup-rbac.sh b/deploy/cleanup-rbac.sh index 50f52bbc5f..c6cd7c8ccc 100755 --- a/deploy/cleanup-rbac.sh +++ b/deploy/cleanup-rbac.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/deploy/cleanup.sh b/deploy/cleanup.sh index afe13f98c7..d779d68393 100755 --- a/deploy/cleanup.sh +++ b/deploy/cleanup.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/deploy/cluster-role-bindings.yaml b/deploy/cluster-role-bindings.yaml index be7d75bb2f..1f6f9a2b35 100644 --- a/deploy/cluster-role-bindings.yaml +++ b/deploy/cluster-role-bindings.yaml @@ -3,6 +3,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: pgo-cluster-role + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/deploy/cluster-roles-readonly.yaml b/deploy/cluster-roles-readonly.yaml index 773e6cd07e..900e83d434 100644 --- a/deploy/cluster-roles-readonly.yaml +++ b/deploy/cluster-roles-readonly.yaml @@ -2,6 +2,8 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pgo-cluster-role + labels: + vendor: crunchydata rules: - apiGroups: - '' diff --git a/deploy/cluster-roles.yaml b/deploy/cluster-roles.yaml index cb0bb85b41..e2a90137c4 100644 --- a/deploy/cluster-roles.yaml +++ b/deploy/cluster-roles.yaml @@ -3,6 +3,8 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pgo-cluster-role + labels: + vendor: crunchydata rules: - apiGroups: - '' @@ -41,8 +43,6 @@ rules: - endpoints - pods - pods/exec - - pods/log - - replicasets - secrets - services - persistentvolumeclaims @@ -55,10 +55,19 @@ rules: - update - delete - deletecollection + - apiGroups: + - '' + resources: + - pods/log + verbs: + - get + - list + - watch - apiGroups: - apps resources: - deployments + - replicasets verbs: - get - list diff --git a/deploy/deploy.sh b/deploy/deploy.sh index 823671c7d9..30fbf32b37 100755 --- a/deploy/deploy.sh +++ b/deploy/deploy.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -46,12 +46,14 @@ fi pgbackrest_aws_s3_key=$(awsKeySecret "aws-s3-key") pgbackrest_aws_s3_key_secret=$(awsKeySecret "aws-s3-key-secret") -$PGO_CMD --namespace=$PGO_OPERATOR_NAMESPACE create secret generic pgo-backrest-repo-config \ - --from-file=config=${PGO_CONF_DIR}/pgo-backrest-repo/config \ - --from-file=sshd_config=${PGO_CONF_DIR}/pgo-backrest-repo/sshd_config \ - --from-file=aws-s3-ca.crt=${PGO_CONF_DIR}/pgo-backrest-repo/aws-s3-ca.crt \ - --from-literal=aws-s3-key="${pgbackrest_aws_s3_key}" \ - --from-literal=aws-s3-key-secret="${pgbackrest_aws_s3_key_secret}" +if [[ ! -z $pgbackrest_aws_s3_key ]] || [[ ! -z $pgbackrest_aws_s3_key_secret ]] +then + $PGO_CMD --namespace=$PGO_OPERATOR_NAMESPACE create secret generic pgo-backrest-repo-config \ + --from-literal=aws-s3-key="${pgbackrest_aws_s3_key}" \ + --from-literal=aws-s3-key-secret="${pgbackrest_aws_s3_key_secret}" + $PGO_CMD --namespace=$PGO_OPERATOR_NAMESPACE label secret pgo-backrest-repo-config \ + vendor=crunchydata +fi # # credentials for pgo-apiserver TLS REST API @@ -63,11 +65,12 @@ then fi $PGO_CMD --namespace=$PGO_OPERATOR_NAMESPACE create secret tls pgo.tls --key=${PGOROOT}/conf/postgres-operator/server.key --cert=${PGOROOT}/conf/postgres-operator/server.crt +$PGO_CMD --namespace=$PGO_OPERATOR_NAMESPACE label secret pgo.tls vendor=crunchydata $PGO_CMD --namespace=$PGO_OPERATOR_NAMESPACE create configmap pgo-config \ --from-file=${PGOROOT}/conf/postgres-operator/pgo.yaml \ --from-file=${PGO_CONF_DIR}/pgo-configs - +$PGO_CMD --namespace=$PGO_OPERATOR_NAMESPACE label configmap pgo-config vendor=crunchydata # # check if custom port value is set, otherwise set default values diff --git a/deploy/deployment.json b/deploy/deployment.json index e4da5c318b..8e7880b4d0 100644 --- a/deploy/deployment.json +++ b/deploy/deployment.json @@ -24,11 +24,18 @@ }, "spec": { "serviceAccountName": "postgres-operator", + "securityContext": { + "runAsNonRoot": true + }, "containers": [ { "name": "apiserver", "image": "$PGO_IMAGE_PREFIX/pgo-apiserver:$PGO_IMAGE_TAG", "imagePullPolicy": "IfNotPresent", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "ports": [ { "containerPort": $PGO_APISERVER_PORT } ], @@ -109,6 +116,10 @@ "name": "operator", "image": "$PGO_IMAGE_PREFIX/postgres-operator:$PGO_IMAGE_TAG", "imagePullPolicy": "IfNotPresent", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "readinessProbe": { "exec": { "command": [ @@ -161,6 +172,10 @@ }, { "name": "scheduler", "image": "$PGO_IMAGE_PREFIX/pgo-scheduler:$PGO_IMAGE_TAG", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "livenessProbe": { "exec": { "command": [ @@ -209,6 +224,10 @@ { "name": "event", "image": "$PGO_IMAGE_PREFIX/pgo-event:$PGO_IMAGE_TAG", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "livenessProbe": { "httpGet": { "path": "/ping", diff --git a/deploy/gen-api-keys.sh b/deploy/gen-api-keys.sh index 8aece10000..de7f67fa98 100755 --- a/deploy/gen-api-keys.sh +++ b/deploy/gen-api-keys.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -13,28 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -# # generate self signed cert for apiserver REST service -# - openssl req \ --x509 \ --nodes \ --newkey rsa:2048 \ --keyout $PGOROOT/conf/postgres-operator/server.key \ --out $PGOROOT/conf/postgres-operator/server.crt \ --days 3650 \ --subj "/C=US/ST=Texas/L=Austin/O=TestOrg/OU=TestDepartment/CN=*" - -# generate CA -#openssl genrsa -out $PGOROOT/conf/apiserver/rootCA.key 4096 -#openssl req -x509 -new -key $PGOROOT/conf/apiserver/rootCA.key -days 3650 -out $PGOROOT/conf/apiserver/rootCA.crt - -# generate cert for secure.domain.com signed with the created CA -#openssl genrsa -out $PGOROOT/conf/apiserver/secure.domain.com.key 2048 -#openssl req -new -key $PGOROOT/conf/apiserver/secure.domain.com.key -out $PGOROOT/conf/apiserver/secure.domain.com.csr -#In answer to question `Common Name (e.g. server FQDN or YOUR name) []:` you should set `secure.domain.com` (your real domain name) -#openssl x509 -req -in $PGOROOT/conf/apiserver/secure.domain.com.csr -CA $PGOROOT/conf/apiserver/rootCA.crt -CAkey $PGOROOT/conf/apiserver/rootCA.key -CAcreateserial -days 365 -out $PGOROOT/conf/apiserver/secure.domain.com.crt - -#openssl genrsa 2048 > $PGOROOT/conf/apiserver/key.pem -#openssl req -new -x509 -key $PGOROOT/conf/apiserver/key.pem -out $PGOROOT/conf/apiserver/cert.pem -days 1095 + -x509 \ + -nodes \ + -newkey ec \ + -pkeyopt ec_paramgen_curve:prime256v1 \ + -pkeyopt ec_param_enc:named_curve \ + -sha384 \ + -keyout $PGOROOT/conf/postgres-operator/server.key \ + -out $PGOROOT/conf/postgres-operator/server.crt \ + -days 3650 \ + -subj "/CN=*" diff --git a/deploy/install-bootstrap-creds.sh b/deploy/install-bootstrap-creds.sh index 1b446824d3..9b16665a58 100755 --- a/deploy/install-bootstrap-creds.sh +++ b/deploy/install-bootstrap-creds.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -17,9 +17,9 @@ set -eu # fill out these variables if you want to change the # default pgo bootstrap user and role -PGOADMIN_USERNAME=admin -PGOADMIN_PASSWORD=examplepassword -PGOADMIN_ROLENAME=pgoadmin +PGOADMIN_USERNAME="${PGOADMIN_USERNAME:-admin}" +PGOADMIN_PASSWORD="${PGOADMIN_PASSWORD:-examplepassword}" +PGOADMIN_ROLENAME="${PGOADMIN_ROLENAME:-pgoadmin}" PGOADMIN_PERMS="*" diff --git a/deploy/install-rbac.sh b/deploy/install-rbac.sh index d96532d9f1..f23f8b784f 100755 --- a/deploy/install-rbac.sh +++ b/deploy/install-rbac.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/deploy/local-namespace-rbac.yaml b/deploy/local-namespace-rbac.yaml index d74f947653..29277675c6 100644 --- a/deploy/local-namespace-rbac.yaml +++ b/deploy/local-namespace-rbac.yaml @@ -3,6 +3,8 @@ kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pgo-local-ns + labels: + vendor: crunchydata rules: - apiGroups: - '' @@ -28,6 +30,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: pgo-local-ns + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -41,6 +45,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: pgo-target-role-binding + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/deploy/remove-crd.sh b/deploy/remove-crd.sh index 764645264f..17a56c1872 100755 --- a/deploy/remove-crd.sh +++ b/deploy/remove-crd.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/deploy/role-bindings.yaml b/deploy/role-bindings.yaml index b8f21c2391..916858e8ed 100644 --- a/deploy/role-bindings.yaml +++ b/deploy/role-bindings.yaml @@ -4,6 +4,8 @@ kind: RoleBinding metadata: name: pgo-role namespace: "$PGO_OPERATOR_NAMESPACE" + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/deploy/roles.yaml b/deploy/roles.yaml index 899551f6a1..e800165f15 100644 --- a/deploy/roles.yaml +++ b/deploy/roles.yaml @@ -4,6 +4,8 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pgo-role namespace: "$PGO_OPERATOR_NAMESPACE" + labels: + vendor: crunchydata rules: - apiGroups: - '' diff --git a/deploy/service-accounts.yaml b/deploy/service-accounts.yaml index f631c8e06b..d48909d647 100644 --- a/deploy/service-accounts.yaml +++ b/deploy/service-accounts.yaml @@ -4,3 +4,5 @@ kind: ServiceAccount metadata: name: postgres-operator namespace: $PGO_OPERATOR_NAMESPACE + labels: + vendor: crunchydata diff --git a/deploy/service.json b/deploy/service.json index f026f5d7d5..97b76e4bc9 100644 --- a/deploy/service.json +++ b/deploy/service.json @@ -4,7 +4,8 @@ "metadata": { "name": "postgres-operator", "labels": { - "name": "postgres-operator" + "name": "postgres-operator", + "vendor": "crunchydata" } }, "spec": { diff --git a/deploy/setupnamespaces.sh b/deploy/setupnamespaces.sh index 9d2188a56f..2cb8c0dbea 100755 --- a/deploy/setupnamespaces.sh +++ b/deploy/setupnamespaces.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/deploy/show-crd.sh b/deploy/show-crd.sh index 7f40285c5d..09e6690002 100755 --- a/deploy/show-crd.sh +++ b/deploy/show-crd.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/deploy/upgrade-creds.sh b/deploy/upgrade-creds.sh index ddc0953df7..df4740dfbd 100755 --- a/deploy/upgrade-creds.sh +++ b/deploy/upgrade-creds.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/deploy/upgrade-pgo.sh b/deploy/upgrade-pgo.sh index 72fdb420f0..38b4e68832 100755 --- a/deploy/upgrade-pgo.sh +++ b/deploy/upgrade-pgo.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2020 Crunchy Data Solutions, Inc. +# Copyright 2020 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/docs/config.toml b/docs/config.toml index 48ef2d760d..e150db3704 100644 --- a/docs/config.toml +++ b/docs/config.toml @@ -2,7 +2,7 @@ baseURL= "" languageCode = "en-us" DefaultContentLanguage = "en" -title = "Crunchy PostgreSQL Operator Documentation" +title = "PGO: PostgreSQL Operator from Crunchy Data Documentation" theme = "crunchy-hugo-theme" pygmentsCodeFences = true pygmentsStyle = "monokailight" @@ -25,16 +25,13 @@ disableNavChevron = false # set true to hide next/prev chevron, default is false highlightClientSide = false # set true to use highlight.pack.js instead of the default hugo chroma highlighter menushortcutsnewtab = true # set true to open shortcuts links to a new tab/window enableGitInfo = true -operatorVersion = "4.5.0" -postgresVersion = "12.4" -postgresVersion13 = "13.0" -postgresVersion12 = "12.4" -postgresVersion11 = "11.9" -postgresVersion10 = "10.14" -postgresVersion96 = "9.6.19" -postgresVersion95 = "9.5.23" +operatorVersion = "4.6.10" +postgresVersion = "13.10" +postgresVersion13 = "13.10" +postgresVersion12 = "12.14" +postgresVersion11 = "11.19" postgisVersion = "3.0" -centosBase = "centos7" +ubiBase = "ubi8" [outputs] home = [ "HTML", "RSS", "JSON"] diff --git a/docs/content/Configuration/compatibility.md b/docs/content/Configuration/compatibility.md index b805ef9c08..c001104811 100644 --- a/docs/content/Configuration/compatibility.md +++ b/docs/content/Configuration/compatibility.md @@ -12,7 +12,75 @@ version dependencies between the two projects. Below are the operator releases a | Operator Release | Container Release | Postgres | PgBackrest Version |:----------|:-------------|:------------|:-------------- -| 4.5.0 | 4.5.0 | 12.4 | 2.29 | +| 4.6.10 | 4.6.10 | 13.10 | 2.31 | +|||12.14|2.31| +|||11.19|2.31| +|||| +| 4.6.9 | 4.6.9 | 13.9 | 2.31 | +|||12.13|2.31| +|||11.18|2.31| +|||10.23|2.31| +|||| +| 4.6.8 | 4.6.8 | 13.8 | 2.31 | +|||12.12|2.31| +|||11.17|2.31| +|||10.22|2.31| +|||| +| 4.6.7 | 4.6.7 | 13.7 | 2.31 | +|||12.11|2.31| +|||11.16|2.31| +|||10.21|2.31| +|||| +| 4.6.6 | 4.6.6 | 13.6 | 2.31 | +|||12.10|2.31| +|||11.15|2.31| +|||10.20|2.31| +|||| +| 4.6.5 | 4.6.5 | 13.5 | 2.31 | +|||12.9|2.31| +|||11.14|2.31| +|||10.19|2.31| +|||9.6.24|2.31| +|||| +| 4.6.4 | 4.6.4 | 13.4 | 2.31 | +|||12.8|2.31| +|||11.13|2.31| +|||10.18|2.31| +|||9.6.23|2.31| +|||| +| 4.6.3 | 4.6.3 | 13.3 | 2.31 | +|||12.7|2.31| +|||11.12|2.31| +|||10.17|2.31| +|||9.6.22|2.31| +|||| +| 4.6.2 | 4.6.2 | 13.2 | 2.31 | +|||12.6|2.31| +|||11.11|2.31| +|||10.16|2.31| +|||9.6.21|2.31| +|||| +| 4.6.1 | 4.6.1 | 13.2 | 2.31 | +|||12.6|2.31| +|||11.11|2.31| +|||10.16|2.31| +|||9.6.21|2.31| +|||| +| 4.6.0 | 4.6.0 | 13.1 | 2.31 | +|||12.5|2.31| +|||11.10|2.31| +|||10.15|2.31| +|||9.6.20|2.31| +|||| +| 4.5.1 | 4.5.1 | 13.1 | 2.29 | +|||12.5|2.29| +|||11.10|2.29| +|||10.15|2.29| +|||9.6.20|2.29| +|||9.5.24|2.29| +|||| +| 4.5.0 | 4.5.0 | 13.0 | 2.29 | +|||12.4|2.29| |||11.9|2.29| |||10.14|2.29| |||9.6.19|2.29| diff --git a/docs/content/Configuration/configuration.md b/docs/content/Configuration/configuration.md index e85823a865..e6bae33be9 100644 --- a/docs/content/Configuration/configuration.md +++ b/docs/content/Configuration/configuration.md @@ -16,9 +16,9 @@ The configuration files used by the Operator are found in 2 places: * the pgo-config ConfigMap in the namespace the Operator is running in * or, a copy of the configuration files are also included by default into the Operator container images themselves to support a very simplistic deployment of the Operator -If the pgo-config ConfigMap is not found by the Operator, it will use -the configuration files that are included in the Operator container -images. +If the `pgo-config` ConfigMap is not found by the Operator, it will create a +`pgo-config` ConfigMap using the configuration files that are included in the +Operator container. ## conf/postgres-operator/pgo.yaml The *pgo.yaml* file sets many different Operator configuration settings and is described in the [pgo.yaml configuration]({{< ref "pgo-yaml-configuration.md" >}}) documentation section. diff --git a/docs/content/Configuration/pgo-yaml-configuration.md b/docs/content/Configuration/pgo-yaml-configuration.md index c1b6a894e1..1faebfd60f 100644 --- a/docs/content/Configuration/pgo-yaml-configuration.md +++ b/docs/content/Configuration/pgo-yaml-configuration.md @@ -16,14 +16,13 @@ The *pgo.yaml* file is broken into major sections as described below: |---|---| |BasicAuth | If set to `"true"` will enable Basic Authentication. If set to `"false"`, will allow a valid Operator user to successfully authenticate regardless of the value of the password provided for Basic Authentication. Defaults to `"true".` |CCPImagePrefix |newly created containers will be based on this image prefix (e.g. crunchydata), update this if you require a custom image prefix -|CCPImageTag |newly created containers will be based on this image version (e.g. {{< param centosBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}}), unless you override it using the --ccp-image-tag command line flag +|CCPImageTag |newly created containers will be based on this image version (e.g. {{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}}), unless you override it using the --ccp-image-tag command line flag |Port | the PostgreSQL port to use for new containers (e.g. 5432) |PGBadgerPort | the port used to connect to pgbadger (e.g. 10000) |ExporterPort | the port used to connect to postgres exporter (e.g. 9187) |User | the PostgreSQL normal user name |Database | the PostgreSQL normal user database |Replicas | the number of cluster replicas to create for newly created clusters, typically users will scale up replicas on the pgo CLI command line but this global value can be set as well -|PgmonitorPassword | the password to use for pgmonitor metrics collection if you specify --metrics when creating a PG cluster |Metrics | boolean, if set to true will cause each new cluster to include crunchy-postgres-exporter as a sidecar container for metrics collection, if set to false (default), users can still add metrics on a cluster-by-cluster basis using the pgo command flag --metrics |Badger | boolean, if set to true will cause each new cluster to include crunchy-pgbadger as a sidecar container for static log analysis, if set to false (default), users can still add pgbadger on a cluster-by-cluster basis using the pgo create cluster command flag --pgbadger |Policies | optional, list of policies to apply to a newly created cluster, comma separated, must be valid policies in the catalog @@ -45,9 +44,9 @@ The *pgo.yaml* file is broken into major sections as described below: | Setting|Definition | |---|---| |PrimaryStorage |required, the value of the storage configuration to use for the primary PostgreSQL deployment -|BackupStorage |required, the value of the storage configuration to use for backups, including the storage for pgbackrest repo volumes |ReplicaStorage |required, the value of the storage configuration to use for the replica PostgreSQL deployments -|BackrestStorage |required, the value of the storage configuration to use for the pgbackrest shared repository deployment created when a user specifies pgbackrest to be enabled on a cluster +|BackrestStorage |required, the value of the storage configuration to use for the pgBackRest repository. +|BackupStorage |required, the value of the storage configuration to use for backups generated by `pg_dump`. |WALStorage | optional, the value of the storage configuration to use for PostgreSQL Write Ahead Log |StorageClass | optional, for a dynamic storage type, you can specify the storage class used for storage provisioning (e.g. standard, gold, fast) |AccessMode |the access mode for new PVCs (e.g. ReadWriteMany, ReadWriteOnce, ReadOnlyMany). See below for descriptions of these. diff --git a/docs/content/Security/configure-postgres-operator-rbac.md b/docs/content/Security/configure-postgres-operator-rbac.md index 63c9e7d36e..4a6d9e5da2 100644 --- a/docs/content/Security/configure-postgres-operator-rbac.md +++ b/docs/content/Security/configure-postgres-operator-rbac.md @@ -72,6 +72,7 @@ The following list shows the current complete list of possible pgo permissions t |DfCluster | allow *pgo df*| |Label | allow *pgo label*| |Reload | allow *pgo reload*| +|Restart | allow *pgo restart*| |Restore | allow *pgo restore*| |RestoreDump | allow *pgo restore* for pgdumps| |ShowBackup | allow *pgo show backup*| diff --git a/docs/content/Upgrade/automatedupgrade.md b/docs/content/Upgrade/automatedupgrade.md index 31480bc07e..72184ced6a 100644 --- a/docs/content/Upgrade/automatedupgrade.md +++ b/docs/content/Upgrade/automatedupgrade.md @@ -127,7 +127,7 @@ pgo upgrade mycluster If you are using the PostGIS-enabled image (i.e. `crunchy-postgres-gis-ha`) or any other custom images, you will need to add the `--ccp-image-tag`: ``` -pgo upgrade --ccp-image-tag={{< param centosBase >}}-{{< param postgresVersion >}}-{{< param postgisVersion >}}-{{< param operatorVersion >}} mygiscluster +pgo upgrade --ccp-image-tag={{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param postgisVersion >}}-{{< param operatorVersion >}} mygiscluster ``` Where `{{< param postgresVersion >}}` is the PostgreSQL version, `{{< param postgisVersion >}}` is the PostGIS version and `{{< param operatorVersion >}}` is the PostgreSQL Operator version. Please note, no tag validation will be performed and additional steps may be required to upgrade your PostGIS extension implementation. For more information on PostGIS upgrade considerations, please see diff --git a/docs/content/Upgrade/manual/upgrade35.md b/docs/content/Upgrade/manual/upgrade35.md index cb7ec25138..e0a678bfc0 100644 --- a/docs/content/Upgrade/manual/upgrade35.md +++ b/docs/content/Upgrade/manual/upgrade35.md @@ -17,7 +17,7 @@ A major change to this container is that the PostgreSQL process is now managed b When creating your new clusters using version {{< param operatorVersion >}} of the PostgreSQL Operator, the `pgo create cluster` command will automatically use the new `crunchy-postgres-ha` image if the image is unspecified. If you are creating a PostGIS enabled cluster, please be sure to use the updated image name and image tag, as with the command: ``` -pgo create cluster mygiscluster --ccp-image=crunchy-postgres-gis-ha --ccp-image-tag={{< param centosBase >}}-{{< param postgresVersion >}}-{{< param postgisVersion >}}-{{< param operatorVersion >}} +pgo create cluster mygiscluster --ccp-image=crunchy-postgres-gis-ha --ccp-image-tag={{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param postgisVersion >}}-{{< param operatorVersion >}} ``` Where `{{< param postgresVersion >}}` is the PostgreSQL version, `{{< param postgisVersion >}}` is the PostGIS version and `{{< param operatorVersion >}}` is the PostgreSQL Operator version. Please note, no tag validation will be performed and additional steps may be required to upgrade your PostGIS extension implementation. For more information on PostGIS upgrade considerations, please see @@ -114,7 +114,7 @@ We strongly recommend that you create a test cluster before proceeding to the ne Once the Operator is installed and functional, create a new {{< param operatorVersion >}} cluster matching the cluster details recorded in Step 1. Be sure to use the primary PVC name (also noted in Step 1) and the same major PostgreSQL version as was used previously. This will allow the new clusters to utilize the existing PVCs. -NOTE: If you have existing pgBackRest backups stored that you would like to have available in the upgraded cluster, you will need to follow the [PVC Renaming Procedure]( {{< relref "Upgrade/manual/upgrade35#pgbackrest-repo-pvc-renaming" >}}). +NOTE: If you have existing pgBackRest backups stored that you would like to have available in the upgraded cluster, you will need to follow the [PVC Renaming Procedure](#pgbackrest-repo-pvc-renaming). A simple example is given below, but more information on cluster creation can be found [here](/pgo-client/common-tasks#creating-a-postgresql-cluster) @@ -226,7 +226,7 @@ spec: volumeName: "crunchy-pv156" ``` -where name matches your new cluster (Remember that this will need to match the "primary PVC" name identified in [Step 2]( {{< relref "Upgrade/manual/upgrade35#step-2" >}}) of the upgrade procedure!) and namespace, storageClassName, accessModes, storage, volumeMode and volumeName match your original PVC. +where name matches your new cluster (Remember that this will need to match the "primary PVC" name identified in [Step 2](#step-2) of the upgrade procedure!) and namespace, storageClassName, accessModes, storage, volumeMode and volumeName match your original PVC. ##### Step 6 diff --git a/docs/content/Upgrade/manual/upgrade4.md b/docs/content/Upgrade/manual/upgrade4.md index da11f86f15..d8fb7a579e 100644 --- a/docs/content/Upgrade/manual/upgrade4.md +++ b/docs/content/Upgrade/manual/upgrade4.md @@ -19,7 +19,7 @@ A major change to this container is that the PostgreSQL process is now managed b When creating your new clusters using version {{< param operatorVersion >}} of the PostgreSQL Operator, the `pgo create cluster` command will automatically use the new `crunchy-postgres-ha` image if the image is unspecified. If you are creating a PostGIS enabled cluster, please be sure to use the updated image name and image tag, as with the command: ``` -pgo create cluster mygiscluster --ccp-image=crunchy-postgres-gis-ha --ccp-image-tag={{< param centosBase >}}-{{< param postgresVersion >}}-{{< param postgisVersion >}}-{{< param operatorVersion >}} +pgo create cluster mygiscluster --ccp-image=crunchy-postgres-gis-ha --ccp-image-tag={{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param postgisVersion >}}-{{< param operatorVersion >}} ``` Where `{{< param postgresVersion >}}` is the PostgreSQL version, `{{< param postgisVersion >}}` is the PostGIS version and `{{< param operatorVersion >}}` is the PostgreSQL Operator version. Please note, no tag validation will be performed and additional steps may be required to upgrade your PostGIS extension implementation. For more information on PostGIS upgrade considerations, please see @@ -151,7 +151,7 @@ We strongly recommend that you create a test cluster before proceeding to the ne Once the Operator is installed and functional, create a new {{< param operatorVersion >}} cluster matching the cluster details recorded in Step 1. Be sure to use the primary PVC name (also noted in Step 1) and the same major PostgreSQL version as was used previously. This will allow the new clusters to utilize the existing PVCs. -NOTE: If you have existing pgBackRest backups stored that you would like to have available in the upgraded cluster, you will need to follow the [PVC Renaming Procedure]( {{< relref "Upgrade/manual/upgrade4#pgbackrest-repo-pvc-renaming" >}}). +NOTE: If you have existing pgBackRest backups stored that you would like to have available in the upgraded cluster, you will need to follow the [PVC Renaming Procedure](#pgbackrest-repo-pvc-renaming). A simple example is given below, but more information on cluster creation can be found [here](/pgo-client/common-tasks#creating-a-postgresql-cluster) @@ -431,7 +431,7 @@ We strongly recommend that you create a test cluster before proceeding to the ne Once the Operator is installed and functional, create a new {{< param operatorVersion >}} cluster matching the cluster details recorded in Step 1. Be sure to use the same name and the same major PostgreSQL version as was used previously. This will allow the new clusters to utilize the existing PVCs. A simple example is given below, but more information on cluster creation can be found [here](/pgo-client/common-tasks#creating-a-postgresql-cluster) -NOTE: If you have existing pgBackRest backups stored that you would like to have available in the upgraded cluster, you will need to follow the [PVC Renaming Procedure]( {{< relref "Upgrade/manual/upgrade4#pgbackrest-repo-pvc-renaming" >}}). +NOTE: If you have existing pgBackRest backups stored that you would like to have available in the upgraded cluster, you will need to follow the [PVC Renaming Procedure](#pgbackrest-repo-pvc-renaming). ``` pgo create cluster -n @@ -543,7 +543,7 @@ spec: volumeName: "crunchy-pv156" ``` -where name matches your new cluster (Remember that this will need to match the "primary PVC" name identified in [Step 2]( {{< relref "Upgrade/manual/upgrade35#step-2" >}}) of the upgrade procedure!) and namespace, storageClassName, accessModes, storage, volumeMode and volumeName match your original PVC. +where name matches your new cluster (Remember that this will need to match the "primary PVC" name identified in [Step 2](#step-2) of the upgrade procedure!) and namespace, storageClassName, accessModes, storage, volumeMode and volumeName match your original PVC. ##### Step 6 diff --git a/docs/content/_index.md b/docs/content/_index.md index f83a7c49e1..34edfd00b0 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -1,20 +1,30 @@ --- -title: "Crunchy PostgreSQL Operator" +title: "PGO: The Postgres Operator from Crunchy Data" date: draft: false --- -# Crunchy PostgreSQL Operator +# PGO: The Postgres Operator from Crunchy Data - + PGO: The Postgres Operator from Crunchy Data -## Run your own production-grade PostgreSQL-as-a-Service on Kubernetes! +## Run [Cloud Native PostgreSQL on Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/) with PGO: The [Postgres Operator](https://github.com/CrunchyData/postgres-operator) from [Crunchy Data](https://www.crunchydata.com/)! Latest Release: {{< param operatorVersion >}} -The [Crunchy PostgreSQL Operator](https://www.crunchydata.com/developers/download-postgres/containers/postgres-operator) automates and simplifies deploying and managing open source PostgreSQL clusters on Kubernetes and other Kubernetes-enabled Platforms by providing the essential features you need to keep your PostgreSQL clusters up and running, including: +[PGO](https://www.crunchydata.com/developers/download-postgres/containers/postgres-operator), +the [Postgres Operator](https://github.com/CrunchyData/postgres-operator) +developed by [Crunchy Data](https://crunchydata.com/) and included in +[Crunchy PostgreSQL for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes/), +automates and simplifies deploying and managing open source PostgreSQL clusters +on Kubernetes. -#### PostgreSQL Cluster [Provisioning]({{< relref "/architecture/provisioning.md" >}}) +Whether you need to get a simple Postgres cluster up and running, need to deploy +a high availability, fault tolerant cluster in production, or are running your +own database-as-a-service, the PostgreSQL Operator provides the essential +features you need to keep your cloud native Postgres clusters healthy, including: + +#### Postgres Cluster [Provisioning]({{< relref "/architecture/provisioning.md" >}}) [Create, Scale, & Delete PostgreSQL clusters with ease](/architecture/provisioning/), while fully customizing your Pods and PostgreSQL configuration! @@ -30,7 +40,7 @@ Backups and restores leverage the open source [pgBackRest](https://www.pgbackres #### TLS -Secure communication between your applications and data servers by [enabling TLS for your PostgreSQL servers](/pgo-client/common-tasks/#enable-tls), including the ability to enforce that all of your connections to use TLS. +Secure communication between your applications and data servers by [enabling TLS for your PostgreSQL servers](/pgo-client/common-tasks/#enable-tls), including the ability to enforce all of your connections to use TLS. #### [Monitoring]({{< relref "/architecture/monitoring.md" >}}) @@ -56,11 +66,11 @@ Create new clusters from your existing clusters or backups with [`pgo create clu #### Connection Pooling - Use [pgBouncer](https://access.crunchydata.com/documentation/pgbouncer/) for connection pooling + Use [pgBouncer]({{< relref "tutorial/pgbouncer.md" >}}) for connection pooling. -#### Node Affinity +#### Affinity and Tolerations -Have your PostgreSQL clusters deployed to [Kubernetes Nodes](https://kubernetes.io/docs/concepts/architecture/nodes/) of your preference +Have your PostgreSQL clusters deployed to [Kubernetes Nodes](https://kubernetes.io/docs/concepts/architecture/nodes/) of your preference with [node affinity]({{< relref "architecture/high-availability/_index.md">}}#node-affinity), or designate which nodes Kubernetes can schedule PostgreSQL instances to with [tolerations]({{< relref "architecture/high-availability/_index.md">}}#tolerations). #### Scheduled Backups @@ -72,16 +82,22 @@ Choose the type of backup (full, incremental, differential) and [how frequently #### Multi-Namespace Support -You can control how the PostgreSQL Operator leverages [Kubernetes Namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) with several different deployment models: +You can control how PGO, the Postgres Operator, leverages [Kubernetes Namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) with several different deployment models: -- Deploy the PostgreSQL Operator and all PostgreSQL clusters to the same namespace -- Deploy the PostgreSQL Operator to one namespaces, and all PostgreSQL clusters to a different namespace -- Deploy the PostgreSQL Operator to one namespace, and have your PostgreSQL clusters managed acrossed multiple namespaces -- Dynamically add and remove namespaces managed by the PostgreSQL Operator using the `pgo create namespace` and `pgo delete namespace` commands +- Deploy PGO and all PostgreSQL clusters to the same namespace +- Deploy PGO to one namespaces, and all PostgreSQL clusters to a different +namespace +- Deploy PGO to one namespace, and have your PostgreSQL clusters managed across +multiple namespaces +- Dynamically add and remove namespaces managed by the PostgreSQL Operator using +the `pgo` client to run `pgo create namespace` and `pgo delete namespace` #### Full Customizability -The Crunchy PostgreSQL Operator makes it easy to get your own PostgreSQL-as-a-Service up and running on Kubernetes-enabled platforms, but we know that there are further customizations that you can make. As such, the Crunchy PostgreSQL Operator allows you to further customize your deployments, including: +The Postgres Operator (PGO) makes it easy to get Postgres up and running on +Kubernetes-enabled platforms, but we know that there are further customizations +that you can make. As such, PGO allows you to further customize your +deployments, including: - Selecting different storage classes for your primary, replica, and backup storage - Select your own container resources class for each PostgreSQL cluster deployment; differentiate between resources applied for primary and replica clusters! @@ -104,6 +120,7 @@ The Crunchy PostgreSQL Operator extends Kubernetes to provide a higher-level abs - [PostgreSQL](https://www.postgresql.org) - [PostgreSQL Contrib Modules](https://www.postgresql.org/docs/current/contrib.html) - [PL/Python + PL/Python 3](https://www.postgresql.org/docs/current/plpython.html) + - [PL/Perl](https://www.postgresql.org/docs/current/plperl.html) - [pgAudit](https://www.pgaudit.org/) - [pgAudit Analyze](https://github.com/pgaudit/pgaudit_analyze) - [pgnodemx](https://github.com/CrunchyData/pgnodemx) @@ -139,24 +156,32 @@ For more information about which versions of the PostgreSQL Operator include whi # Supported Platforms -The Crunchy PostgreSQL Operator is tested on the following Platforms: +PGO, the Postgres Operator, is Kubernetes-native and maintains backwards +compatibility to Kubernetes 1.11 and is tested is tested against the following +platforms: -- Kubernetes 1.13+ -- OpenShift 3.11+ +- Kubernetes 1.17+ +- Openshift 4.4+ +- OpenShift 3.11 - Google Kubernetes Engine (GKE), including Anthos - Amazon EKS -- VMware Enterprise PKS 1.3+ +- Microsoft AKS +- VMware Tanzu -## Storage +This list only includes the platforms that the Postgres Operator is specifically +tested on as part of the release process: PGO works on other Kubernetes +distributions as well. -The Crunchy PostgreSQL Operator is tested with a variety of different types of Kubernetes storage and Storage Classes, including: +## Storage -- Rook -- StorageOS -- Google Compute Engine persistent volumes -- NFS -- HostPath +PGO, the Postgres Operator, is tested with a variety of different types of +Kubernetes storage and Storage Classes, as well as hostPath and NFS. -and more. We have had reports of people using the PostgreSQL Operator with other [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) as well. +We know there are a variety of different types of [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) +available for Kubernetes and we do our best to test each one, but due to the +breadth of this area we are unable to verify Postgres Operator functionality in +each one. With that said, the PostgreSQL Operator is designed to be storage +class agnostic and has been demonstrated to work with additional Storage +Classes. -We know there are a variety of different types of [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) available for Kubernetes and we do our best to test each one, but due to the breadth of this area we are unable to verify PostgreSQL Operator functionality in each one. With that said, the PostgreSQL Operator is designed to be storage class agnostic and has been demonstrated to work with additional Storage Classes. Storage is a rapidly evolving field in Kubernetes and we will continue to adapt the PostgreSQL Operator to modern Kubernetes storage standards. +The PGO Postgres Operator project source code is available subject to the [Apache 2.0 license](https://raw.githubusercontent.com/CrunchyData/postgres-operator/master/LICENSE.md) with the PGO logo and branding assets covered by [our trademark guidelines](/logos/TRADEMARKS.md). diff --git a/docs/content/advanced/crunchy-postgres-exporter.md b/docs/content/advanced/crunchy-postgres-exporter.md index b9b2a3ba09..abfd1f63c4 100644 --- a/docs/content/advanced/crunchy-postgres-exporter.md +++ b/docs/content/advanced/crunchy-postgres-exporter.md @@ -23,9 +23,9 @@ can be specified for the API to collect. For an example of a queries.yml file, s The crunchy-postgres-exporter Docker image contains the following packages (versions vary depending on PostgreSQL version): -* PostgreSQL ({{< param postgresVersion13 >}}, {{< param postgresVersion12 >}}, {{< param postgresVersion11 >}}, {{< param postgresVersion10 >}}, {{< param postgresVersion96 >}} and {{< param postgresVersion95 >}}) -* CentOS7 - publicly available -* UBI7 - customers only +* PostgreSQL ({{< param postgresVersion13 >}}, {{< param postgresVersion12 >}}, and {{< param postgresVersion11 >}}). +* UBI 8 - publicly available +* UBI 7, UBI 8 - customers only * [PostgreSQL Exporter](https://github.com/wrouesnel/postgres_exporter) ## Environment Variables diff --git a/docs/content/advanced/custom-configuration.md b/docs/content/advanced/custom-configuration.md index 4ac15d1578..b1df1c81b0 100644 --- a/docs/content/advanced/custom-configuration.md +++ b/docs/content/advanced/custom-configuration.md @@ -75,6 +75,46 @@ files that ship with the Crunchy Postgres container, there is no requirement to. In this event, continue using the Operator as usual and avoid defining a global configMap. +## Create a PostgreSQL Cluster With Custom Configuration + +The PostgreSQL Operator allows for a PostgreSQL cluster to be created with a customized configuration. To do this, one must create a ConfigMap with an entry called `postgres-ha.yaml` that contains the custom configuration. The custom configuration follows the [Patorni YAML format](https://access.crunchydata.com/documentation/patroni/latest/settings/). Note that parameters that are placed in the `bootstrap` section are applied once during cluster initialization. Editing these values in a working cluster require following the [Modifying PostgreSQL Cluster Configuration](#modifying-postgresql-cluster-configuration) section. + +For example, let's say we want to create a PostgreSQL cluster with `shared_buffers` set to `2GB`, `max_connections` set to `30` and `password_encryption` set to `scram-sha-256`. We would create a configuration file that looks similar to: + +``` +--- +bootstrap: + dcs: + postgresql: + parameters: + max_connections: 30 + shared_buffers: 2GB + password_encryption: scram-sha-256 +``` + +Save this configuration in a file called `postgres-ha.yaml`. + +Create a [`ConfigMap`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) like so: + +``` +kubectl -n pgo create configmap hippo-custom-config --from-file=postgres-ha.yaml +``` + +You can then have you new PostgreSQL cluster use `hippo-custom-config` as part of its cluster initialization by using the `--custom-config` flag of `pgo create cluster`: + +``` +pgo create cluster hippo -n pgo --custom-config=hippo-custom-config +``` + +After your cluster is initialized, [connect to your cluster]({{< relref "tutorial/connect-cluster.md" >}}) and confirm that your settings have been applied: + +``` +SHOW shared_buffers; + + shared_buffers +---------------- + 2GB +``` ## Modifying PostgreSQL Cluster Configuration @@ -150,7 +190,7 @@ postgresql: shared_buffers: 128MB shared_preload_libraries: pgaudit.so,pg_stat_statements.so temp_buffers: 8MB - unix_socket_directories: /tmp,/crunchyadm + unix_socket_directories: /tmp wal_level: logical work_mem: 4MB recovery_conf: @@ -168,7 +208,6 @@ postgresql: - basebackup pg_hba: - local all postgres peer - - local all crunchyadm peer - host replication primaryuser 0.0.0.0/0 md5 - host all primaryuser 0.0.0.0/0 reject - host all all 0.0.0.0/0 md5 diff --git a/docs/content/architecture/disaster-recovery.md b/docs/content/architecture/disaster-recovery.md index 161bcedac0..eb2947b702 100644 --- a/docs/content/architecture/disaster-recovery.md +++ b/docs/content/architecture/disaster-recovery.md @@ -36,10 +36,10 @@ At PostgreSQL cluster creation time, you can specify a specific Storage Class for the pgBackRest repository. Additionally, you can also specify the type of pgBackRest repository that can be used, including: -- `local`: Uses the storage that is provided by the Kubernetes cluster's Storage +- `posix`: Uses the storage that is provided by the Kubernetes cluster's Storage Class that you select - `s3`: Use Amazon S3 or an object storage system that uses the S3 protocol -- `local,s3`: Use both the storage that is provided by the Kubernetes cluster's +- `posix,s3`: Use both the storage that is provided by the Kubernetes cluster's Storage Class that you select AND Amazon S3 (or equivalent object storage system that uses the S3 protocol) @@ -195,7 +195,7 @@ to re-enable autofail if you would like your PostgreSQL cluster to be highly-available. You can re-enable autofail with this command: ```shell -pgo update cluster hacluster --autofail=true +pgo update cluster hacluster --enable-autofail ``` ## Scheduling Backups @@ -300,7 +300,100 @@ stored in Kubernetes Secrets and are securely mounted to the PostgreSQL clusters. To enable a PostgreSQL cluster to use S3, the `--pgbackrest-storage-type` on the -`pgo create cluster` command needs to be set to `s3` or `local,s3`. +`pgo create cluster` command needs to be set to `s3` or `posix,s3`. Once configured, the `pgo backup` and `pgo restore` commands will work with S3 similarly to the above! + +## Deleting a Backup + +{{% notice warning %}} +If you delete a backup that is *not* set to expire, you may be unable to meet +your retention requirements. If you are deleting backups to free space, it is +recommended to delete your oldest backups first. +{{% /notice %}} + +A backup can be deleted using the [`pgo delete backup`]({{< relref "pgo-client/reference/pgo_delete_backup.md" >}}) +command. You must specify a specific backup to delete using the `--target` flag. +You can get the backup names from the +[`pgo show backup`]({{< relref "pgo-client/reference/pgo_show_backup.md" >}}) +command. + +For example, using a PostgreSQL cluster called `hippo`, pretend there is an +example pgBackRest repository in the state shown after running the + `pgo show backup hippo` command: + +``` +cluster: hippo +storage type: posix + +stanza: db + status: ok + cipher: none + + db (current) + wal archive min/max (12-1) + + full backup: 20201220-171801F + timestamp start/stop: 2020-12-20 17:18:01 +0000 UTC / 2020-12-20 17:18:10 +0000 UTC + wal start/stop: 000000010000000000000002 / 000000010000000000000002 + database size: 31.3MiB, backup size: 31.3MiB + repository size: 3.8MiB, repository backup size: 3.8MiB + backup reference list: + + incr backup: 20201220-171801F_20201220-171939I + timestamp start/stop: 2020-12-20 17:19:39 +0000 UTC / 2020-12-20 17:19:41 +0000 UTC + wal start/stop: 000000010000000000000005 / 000000010000000000000005 + database size: 31.3MiB, backup size: 216.3KiB + repository size: 3.8MiB, repository backup size: 25.9KiB + backup reference list: 20201220-171801F + + incr backup: 20201220-171801F_20201220-172046I + timestamp start/stop: 2020-12-20 17:20:46 +0000 UTC / 2020-12-20 17:23:29 +0000 UTC + wal start/stop: 00000001000000000000000A / 00000001000000000000000A + database size: 65.9MiB, backup size: 37.5MiB + repository size: 7.7MiB, repository backup size: 4.3MiB + backup reference list: 20201220-171801F, 20201220-171801F_20201220-171939I + + full backup: 20201220-201305F + timestamp start/stop: 2020-12-20 20:13:05 +0000 UTC / 2020-12-20 20:13:15 +0000 UTC + wal start/stop: 00000001000000000000000F / 00000001000000000000000F + database size: 65.9MiB, backup size: 65.9MiB + repository size: 7.7MiB, repository backup size: 7.7MiB + backup reference list: +``` + +The backup targets can be found after the backup type, e.g. `20201220-171801F` +or `20201220-171801F_20201220-172046I`. + +One can delete the oldest backup, in this case `20201220-171801F`, by running +the following command: + +``` +pgo delete backup hippo --target=20201220-171801F +``` + +Verify the backup is deleted with `pgo show backup hippo`: + +``` +cluster: hippo +storage type: posix + +stanza: db + status: ok + cipher: none + + db (current) + wal archive min/max (12-1) + + full backup: 20201220-201305F + timestamp start/stop: 2020-12-20 20:13:05 +0000 UTC / 2020-12-20 20:13:15 +0000 UTC + wal start/stop: 00000001000000000000000F / 00000001000000000000000F + database size: 65.9MiB, backup size: 65.9MiB + repository size: 7.7MiB, repository backup size: 7.7MiB + backup reference list: +``` + +(Note: this had the net effect of expiring all of the incremental backups +associated with the full backup that as deleted. This is a feature of +pgBackRest). diff --git a/docs/content/architecture/high-availability/_index.md b/docs/content/architecture/high-availability/_index.md index c5f05eaf96..08eb2da4c2 100644 --- a/docs/content/architecture/high-availability/_index.md +++ b/docs/content/architecture/high-availability/_index.md @@ -219,6 +219,30 @@ number of nodes are available to support this configuration, certain deployments will fail, since it will not be possible for Kubernetes to successfully schedule the pods for each deployment. +It is possible to fine tune the pod anti-affinity rules further, specifically, +set different affinity rules for the PostgreSQL, pgBackRest, and pgBouncer +Deployments. These can be handled by the following flags on [`pgo create cluster`]({{< relref "pgo-client/reference/pgo_create_cluster.md">}}): + +- `--pod-anti-affinity`: Sets the pod anti-affinity rules for all the managed +Deployments in the cluster (PostgreSQL, pgBackRest, pgBouncer) +- `--pod-anti-affinity-pgbackrest`: Sets the pod anti-affinity rules for _only_ +the pgBackRest Deployment. This takes precedence over the value of +`--pod-anti-affinity`. +- `--pod-anti-affinity-pgbouncer`: Sets the pod anti-affinity rules for _only_ +the pgBouncer Deployment. This takes precedence over the value of +`--pod-anti-affinity`. + +For example, to use `required` pod anti-affinity between PostgreSQL instances +but use only `preferred` anti-affinity for pgBackRest and pgBouncer, you could +use the following command: + +``` +pgo create cluster hippo --replicas=2 --pgbouncer \ + --pod-anti-affinity=required \ + --pod-anti-affinity=preferred \ + --pod-anti-afinity=preferred +``` + ## Synchronous Replication: Guarding Against Transactions Loss Clusters managed by the Crunchy PostgreSQL Operator can be deployed with @@ -272,7 +296,152 @@ when creating a PostgreSQL cluster; pgo create cluster thatcluster --node-label=region=us-east-1 ``` -The Node Affinity only uses the `preferred` scheduling strategy (similar to what -is described in the Pod Anti-Affinity section above), so if a Pod cannot be -scheduled to a particular Node matching the label, it will be scheduled to a -different Node. +By default, node affinity uses the `preferred` scheduling strategy (similar to +what is described in the [Pod Anti-Affinity]("#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity") +section above), so if a Pod cannot be scheduled to a particular Node matching +the label, it will be scheduled to a different Node. + +The PostgreSQL Operator supports two different types of node affinity: + +- `preferred` +- `required` + +which can be selected with the `--node-affinity-type` flag, e.g: + +``` +pgo create cluster hippo \ + --node-label=region=us-east-1 --node-affinity-type=required +``` + +When creating a cluster, the node affinity rules will be applied to the primary +and any other PostgreSQL instances that are added. If you would like to specify +a node affinity rule for a specific instance, you can do so with the +[`pgo scale`]({{< relref "pgo-client/reference/pgo_scale.md">}}) command and the +`--node-label` and `--node-affinity-type` flags, i.e: + +``` +pgo scale cluster hippo \ + --node-label=region=us-south-1 --node-affinity-type=required +``` + +## Tolerations + +Kubernetes [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) +can help with the scheduling of Pods to appropriate nodes. There are many +reasons that a Kubernetes administrator may want to use tolerations, such as +restricting the types of Pods that can be assigned to particular Nodes. +Reasoning and strategy for using taints and tolerations is outside the scope of +this documentation. + +The PostgreSQL Operator supports the setting of tolerations across all +PostgreSQL instances in a cluster, as well as for each particular PostgreSQL +instance within a cluster. Both the [`pgo create cluster`]({{< relref "pgo-client/reference/pgo_create_cluster.md">}}) +and [`pgo scale`]({{< relref "pgo-client/reference/pgo_scale.md">}}) commands +support the `--toleration` flag, which allows for one or more tolerations to be +added to a PostgreSQL cluster. Values accepted by the `--toleration` use the +following format: + +``` +rule:Effect +``` + +where a `rule` can represent existence (e.g. `key`) or equality (`key=value`) +and `Effect` is one of `NoSchedule`, `PreferNoSchedule`, or `NoExecute`. For +more information on how tolerations work, please refer to the +[Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +For example, to add two tolerations to a new PostgreSQL cluster, one that is an +existence toleration for a key of `ssd` and the other that is an equality +toleration for a key/value pair of `zone`/`east`, you can run the following +command: + +``` +pgo create cluster hippo \ + --toleration=ssd:NoSchedule \ + --toleration=zone=east:NoSchedule +``` + +For another example, to assign equality toleration for a key/value pair of +`zone`/`west` to a new instance in the `hippo` cluster, you can run the +following command: + +``` +pgo scale hippo --toleration=zone=west:NoSchedule +``` + +Tolerations can be updated on an existing cluster. You can do this by either +modifying the `pgclusters.crunchydata.com` and `pgreplicas.crunchydata.com` +custom resources directly, e.g. via the `kubectl edit` command, or with the +[`pgo update cluster`]({{ relref "pgo-client/reference/pgo_update_cluster.md" }}) +command. Using the `pgo update cluster` command, a toleration can be removed by +adding a `-` at the end of the toleration effect. + +For example, to add a toleration of `zone=west:NoSchedule` and remove the +toleration of `zone=east:NoSchedule`, you could run the following command: + +``` +pgo update cluster hippo \ + --toleration=zone=west:NoSchedule \ + --toleration=zone-east:NoSchedule- +``` + +Once the updates are applied, the PostgreSQL Operator will roll out the changes +to the appropriate instances. + +## Rolling Updates + +During the lifecycle of a PostgreSQL cluster, there are certain events that may +require a planned restart, such as an update to a "restart required" PostgreSQL +configuration setting (e.g. [`shared_buffers`](https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS)) +or a change to a Kubernetes Deployment template (e.g. [changing the memory request]({{< relref "tutorial/customize-cluster.md">}}#customize-cpu-memory)). Restarts can be disruptive in a high availability deployment, which is +why many setups employ a ["rolling update" strategy](https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/) +(aka a "rolling restart") to minimize or eliminate downtime during a planned +restart. + +Because PostgreSQL is a stateful application, a simple rolling restart strategy +will not work: PostgreSQL needs to ensure that there is a primary available that +can accept reads and writes. This requires following a method that will minimize +the amount of downtime when the primary is taken offline for a restart. + +The PostgreSQL Operator provides a mechanism for rolling updates implicitly on +certain operations that change the Deployment templates (e.g. memory updates, +CPU updates, adding tablespaces, modifiny annotations) and explicitly through +the [`pgo restart`]({{< relref "pgo-client/reference/pgo_restart.md">}}) +command with the `--rolling` flag. The PostgreSQL Operator uses the following +algorithm to perform the rolling restart to minimize any potential +interruptions: + +1. Each replica is updated in sequential order. This follows the following +process: + + 1. The replica is explicitly shut down to ensure any outstanding changes are + flushed to disk. + + 2. If requested, the PostgreSQL Operator will apply any changes to the + Deployment. + + 3. The replica is brought back online. The PostgreSQL Operator waits for the + replica to become available before it proceeds to the next replica. + +2. The above steps are repeated until all of the replicas are restarted. + +3. A controlled switchover is performed. The PostgreSQL Operator determines +which replica is the best candidate to become the new primary. It then demotes +the primary to become a replica and promotes the best candidate to become the +new primary. + +4. The former primary follows a process similar to what is described in step 1. + +The downtime is thus constrained to the amount of time the switchover takes. + +A rolling update strategy will be used if any of the following changes are made +to a PostgreSQL cluster, either through the `pgo update` command or from a +modification to the custom resource: + +- Memory resource adjustments +- CPU resource adjustments +- Custom annotation changes +- Enabling/disabling the monitoring sidecar on a PostgreSQL cluster (`--metrics`) +- Enabling/disabling the pgBadger sidecar on a PostgreSQL cluster (`--pgbadger`) +- Tablespace additions +- Toleration modifications diff --git a/docs/content/architecture/high-availability/multi-cluster-kubernetes.md b/docs/content/architecture/high-availability/multi-cluster-kubernetes.md index c6043adba4..c84b5ad35b 100644 --- a/docs/content/architecture/high-availability/multi-cluster-kubernetes.md +++ b/docs/content/architecture/high-availability/multi-cluster-kubernetes.md @@ -93,6 +93,14 @@ that matches that of the active cluster it is replicating. - `--pgbackrest-s3-endpoint`: The S3 endpoint to use - `--pgbackrest-s3-region`: The S3 region to use +If you do not want to set the user credentials, you can retrieve them at a later +time by using the [`pgo show user`]({{< relref "/pgo-client/reference/pgo_show_user.md" >}}) +command with the `--show-system-accounts` flag, e.g. + +``` +pgo show user --show-system-accounts hippo +``` + With respect to the credentials, it should be noted that when the standby cluster is being created within the same Kubernetes cluster AND it has access to the Kubernetes Secret created for the active cluster, one can use the @@ -134,7 +142,7 @@ pgBackRest. For example: ``` pgo create cluster hippo --pgbouncer --replica-count=2 \ - --pgbackrest-storage-type=local,s3 \ + --pgbackrest-storage-type=posix,s3 \ --pgbackrest-s3-key= \ --pgbackrest-s3-key-secret= \ --pgbackrest-s3-bucket=watering-hole \ @@ -182,6 +190,9 @@ pgo create cluster hippo-standby --standby --pgbouncer --replica-count=2 \ --password=opensourcehippo ``` +(If you are unsure of your credentials, you can use +`pgo show user hippo --show-system-accounts` to retrieve them). + Note the use of the `--pgbackrest-repo-path` flag as it points to the name of the pgBackRest repository that is used for the original `hippo` cluster. @@ -208,14 +219,14 @@ command. ``` pgo show cluster hippo -cluster : standby (crunchy-postgres-ha:{{< param centosBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}}) +cluster : standby (crunchy-postgres-ha:{{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}}) standby : true ``` ## Promoting a Standby Cluster There comes a time where a standby cluster needs to be promoted to an active cluster. Promoting a standby cluster means that a PostgreSQL instance within -it will become a priary and start accepting both reads and writes. This has the +it will become a primary and start accepting both reads and writes. This has the net effect of pushing WAL (transaction archives) to the pgBackRest repository, so we need to take a few steps first to ensure we don't accidentally create a split-brain scenario. diff --git a/docs/content/architecture/monitoring.md b/docs/content/architecture/monitoring.md index 75c9b0eb5f..9258b18f33 100644 --- a/docs/content/architecture/monitoring.md +++ b/docs/content/architecture/monitoring.md @@ -35,6 +35,14 @@ command, for example: pgo create cluster --metrics hippo ``` +If you have already created a cluster and want to add metrics collection to it, +you can use the `--enable-metrics` flag as part of the [`pgo update cluster`]({{< relref "pgo-client/reference/pgo_update_cluster.md" >}}) +command, for example: + +``` +pgo update cluster --enable-metrics hippo +``` + ## Components The [PostgreSQL Operator Monitoring]({{< relref "installation/metrics/_index.md" >}}) diff --git a/docs/content/architecture/namespace.md b/docs/content/architecture/namespace.md index f6b4265723..1a551a8b91 100644 --- a/docs/content/architecture/namespace.md +++ b/docs/content/architecture/namespace.md @@ -34,8 +34,8 @@ settings. Enables full dynamic namespace capabilities, in which the Operator can create, delete and update any namespaces within a Kubernetes cluster. With `dynamic` mode enabled, the PostgreSQL Operator -can respond to namespace events in a Kubernetes cluster, such as when a namespace is created, and -take an appropriate action, such as adding the PostgreSQL Operator controllers for the newly +can respond to namespace events in a Kubernetes cluster, such as when a namespace is created, and +take an appropriate action, such as adding the PostgreSQL Operator controllers for the newly created namespace. The following defines the namespace permissions required for the `dynamic` mode to be enabled: @@ -62,8 +62,8 @@ rules: ### `readonly` -In `readonly` mode, the PostgreSQL Operator is still able to listen to namespace events within a -Kubernetes cluster, but it can no longer modify (create, update, delete) namespaces. For example, +In `readonly` mode, the PostgreSQL Operator is still able to listen to namespace events within a +Kubernetes cluster, but it can no longer modify (create, update, delete) namespaces. For example, if a Kubernetes administrator creates a namespace, the PostgreSQL Operator can respond and create controllers for that namespace. @@ -95,7 +95,7 @@ Operator is unable to dynamically respond to namespace events in the cluster, i target namespaces are deleted or new target namespaces need to be added, the PostgreSQL Operator will need to be re-deployed. -Please note that it is important to redeploy the PostgreSQL Operator following the deletion of a +Please note that it is important to redeploy the PostgreSQL Operator following the deletion of a target namespace to ensure it no longer attempts to listen for events in that namespace. The `disabled` mode is enabled the when the PostgreSQL Operator has not been assigned namespace @@ -103,22 +103,22 @@ permissions. ## RBAC Reconciliation -By default, the PostgreSQL Operator will attempt to reconcile RBAC resources (ServiceAccounts, +By default, the PostgreSQL Operator will attempt to reconcile RBAC resources (ServiceAccounts, Roles and RoleBindings) within each namespace configured for the PostgreSQL Operator installation. This allows the PostgreSQL Operator to create, update and delete the various RBAC resources it requires in order to properly create and manage PostgreSQL clusters within each targeted namespace (this includes self-healing RBAC resources as needed if removed and/or misconfigured). In order for RBAC reconciliation to function properly, the PostgreSQL Operator ServiceAccount must -be assigned a certain set of permissions. While the PostgreSQL Operator is not concerned with +be assigned a certain set of permissions. While the PostgreSQL Operator is not concerned with exactly how it has been assigned the permissions required to reconcile RBAC in each target -namespace, the various [installation methods]({{< relref "installation" >}}) supported by the +namespace, the various [installation methods]({{< relref "installation" >}}) supported by the PostgreSQL Operator install a recommended set permissions based on the specific Namespace Operating Mode enabled (see section [Namespace Operating Modes]({{< relref "#namespace-operating-modes" >}}) above for more information regarding the various Namespace Operating Modes available). -The following section defines the recommended set of permissions that should be assigned to the -PostgreSQL Operator ServiceAccount in order to ensure proper RBAC reconciliation based on the +The following section defines the recommended set of permissions that should be assigned to the +PostgreSQL Operator ServiceAccount in order to ensure proper RBAC reconciliation based on the specific Namespace Operating Mode enabled. Please note that each PostgreSQL Operator installation method handles the initial configuration and setup of the permissions shown below based on the Namespace Operating Mode configured during installation. @@ -127,7 +127,7 @@ Namespace Operating Mode configured during installation. When using the `dynamic` Namespace Operating Mode, it is recommended that the PostgreSQL Operator ServiceAccount be granted permissions to manage RBAC inside any namespace in the Kubernetes cluster -via a ClusterRole. This allows for a fully-hands off approach to managing RBAC within each +via a ClusterRole. This allows for a fully-hands off approach to managing RBAC within each targeted namespace space. In other words, as namespaces are added and removed post-installation of the PostgreSQL Operator (e.g. using `pgo create namespace` or `pgo delete namespace`), the Operator is able to automatically reconcile RBAC in those namespaces without the need for any external @@ -170,8 +170,6 @@ rules: - endpoints - pods - pods/exec - - pods/log - - replicasets - secrets - services - persistentvolumeclaims @@ -184,10 +182,19 @@ rules: - update - delete - deletecollection + - apiGroups: + - '' + resources: + - pods/log + verbs: + - get + - list + - watch - apiGroups: - apps resources: - deployments + - replicasets verbs: - get - list @@ -230,7 +237,7 @@ rules: ### `readonly` & `disabled` Namespace Operating Modes -When using the `readonly` or `disabled` Namespace Operating Modes, it is recommended that the +When using the `readonly` or `disabled` Namespace Operating Modes, it is recommended that the PostgreSQL Operator ServiceAccount be granted permissions to manage RBAC inside of any configured namespaces using local Roles within each targeted namespace. This means that as new namespaces are added and removed post-installation of the PostgreSQL Operator, an administrator must manually diff --git a/docs/content/architecture/overview.md b/docs/content/architecture/overview.md index 9365787ba8..bf12101df1 100644 --- a/docs/content/architecture/overview.md +++ b/docs/content/architecture/overview.md @@ -78,7 +78,7 @@ built-in metrics and connection pooling, similar to: We can accomplish that with a single command: ```shell -pgo create cluster hacluster --replica-count=1 --metrics --pgbackrest-storage-type="local,s3" --pgbouncer --pgbadger +pgo create cluster hacluster --replica-count=1 --metrics --pgbackrest-storage-type="posix,s3" --pgbouncer --pgbadger ``` The PostgreSQL Operator handles setting up all of the various Deployments and diff --git a/docs/content/architecture/postgres-operator-containers-overview.md b/docs/content/architecture/postgres-operator-containers-overview.md index 028b3b1f74..4397c63841 100644 --- a/docs/content/architecture/postgres-operator-containers-overview.md +++ b/docs/content/architecture/postgres-operator-containers-overview.md @@ -9,9 +9,13 @@ weight: 600 The PostgreSQL Operator orchestrates a series of PostgreSQL and PostgreSQL related containers containers that enable rapid deployment of PostgreSQL, including administration and monitoring tools in a Kubernetes environment. The PostgreSQL Operator supports PostgreSQL 9.5+ with multiple PostgreSQL cluster deployment strategies and a variety of PostgreSQL related extensions and tools enabling enterprise grade PostgreSQL-as-a-Service. A full list of the containers supported by the PostgreSQL Operator is provided below. -### PostgreSQL Server and Extensions +### PostgreSQL Server, Tools, and Extensions -* **PostgreSQL** (crunchy-postgres-ha). PostgreSQL database server. The crunchy-postgres container image is unmodified, open source PostgreSQL packaged and maintained by Crunchy Data. +* **PostgreSQL** (crunchy-postgres-ha). PostgreSQL database server. The crunchy-postgres container image is unmodified, open source PostgreSQL packaged and maintained by Crunchy Data. The container supports PostgreSQL tools by running in different modes, more information on running modes can be found in the [Crunchy Container](https://access.crunchydata.com/documentation/crunchy-postgres-containers/latest/) documentation. The PostgreSQL operator uses the following running modes: + + - **pgdump** (MODE: pgdump) running in pgdump mode, the image executes either a pg_dump or pg_dumpall database backup against another PostgreSQL database. + - **pgrestore** (MODE: pgrestore) running in pgrestore mode, the image provides a means of performing a restore of a dump from pg_dump or pg_dumpall via psql or pg_restore to a PostgreSQL container database. + - **sqlrunner** (MODE: sqlrunner) running in sqlrunner mode, the image will use `psql` to issue specified queries, defined in SQL files, to a PostgreSQL container database. * **PostGIS** (crunchy-postgres-ha-gis). PostgreSQL database server including the PostGIS extension. The crunchy-postgres-gis container image is unmodified, open source PostgreSQL packaged and maintained by Crunchy Data. This image is identical to the crunchy-postgres image except it includes the open source geospatial extension PostGIS for PostgreSQL in addition to the language extension PL/R which allows for writing functions in the R statistical computing language. @@ -19,11 +23,6 @@ The PostgreSQL Operator orchestrates a series of PostgreSQL and PostgreSQL relat * **pgBackRest** (crunchy-postgres-ha). pgBackRest is a high performance backup and restore utility for PostgreSQL. The crunchy-postgres-ha container executes the pgBackRest utility, allowing FULL and DELTA restore capability. -* **pgdump** (crunchy-pgdump). The crunchy-pgdump container executes either a pg_dump or pg_dumpall database backup against another PostgreSQL database. - -* **crunchy-pgrestore** (restore). The restore image provides a means of performing a restore of a dump from pg_dump or pg_dumpall via psql or pg_restore to a PostgreSQL container database. - - ### Administration Tools * **pgAdmin4** (crunchy-pgadmin4). PGAdmin4 is a graphical user interface administration tool for PostgreSQL. The crunchy-pgadmin4 container executes the pgAdmin4 web application. diff --git a/docs/content/architecture/provisioning.md b/docs/content/architecture/provisioning.md index 23734ee180..a43d4baaba 100644 --- a/docs/content/architecture/provisioning.md +++ b/docs/content/architecture/provisioning.md @@ -49,8 +49,8 @@ allowing them to replay old WAL logs backups and perform full and point-in-time restores The pgBackRest repository can be configured to use storage that resides within -the Kubernetes cluster (the `local` option), Amazon S3 or a storage system that -uses the S3 protocol (the `s3` option), or both (`local,s3`). +the Kubernetes cluster (the `posix` option), Amazon S3 or a storage system that +uses the S3 protocol (the `s3` option), or both (`posix,s3`). Once the PostgreSQL primary instance is ready, there are two follow up actions that the PostgreSQL Operator takes to properly leverage the pgBackRest @@ -147,7 +147,7 @@ pgo create cluster mycluster2 --restore-from=mycluster1 ``` By default, pgBackRest will restore the latest backup available in the repository, and will replay -all available WAL archives. However, additional pgBackRest options can be specified using the +all available WAL archives. However, additional pgBackRest options can be specified using the `restore-opts` option, which allows the restore command to be further tailored and customized. For instance, the following demonstrates how a point-in-time restore can be utilized when creating a new cluster: diff --git a/docs/content/contributing/developer-setup.md b/docs/content/contributing/developer-setup.md index 84ea155636..3525f4374c 100644 --- a/docs/content/contributing/developer-setup.md +++ b/docs/content/contributing/developer-setup.md @@ -12,7 +12,7 @@ This guide is intended for those wanting to build the Operator from source or co # Prerequisites -The target development host for these instructions is a CentOS 8 or RHEL 8 host. Others operating systems +The target development host for these instructions is a RHEL 8 host. Others operating systems are possible, however we do not support building or running the Operator on others at this time. ## Environment Variables diff --git a/docs/content/custom-resources/_index.md b/docs/content/custom-resources/_index.md index e27adfd3e6..17f9d469f7 100644 --- a/docs/content/custom-resources/_index.md +++ b/docs/content/custom-resources/_index.md @@ -35,213 +35,13 @@ need to interface through the [`pgo` client]({{< relref "/pgo-client/_index.md" The following sections will describe the functionality that is available today when manipulating the PostgreSQL Operator Custom Resources directly. -## PostgreSQL Operator Custom Resource Definitions - -There are several PostgreSQL Operator Custom Resource Definitions (CRDs) that -are installed in order for the PostgreSQL Operator to successfully function: - -- `pgclusters.crunchydata.com`: Stores information required to manage a -PostgreSQL cluster. This includes things like the cluster name, what storage and -resource classes to use, which version of PostgreSQL to run, information about -how to maintain a high-availability cluster, etc. -- `pgreplicas.crunchydata.com`: Stores information required to manage the -replicas within a PostgreSQL cluster. This includes things like the number of -replicas, what storage and resource classes to use, special affinity rules, etc. -- `pgtasks.crunchydata.com`: A general purpose CRD that accepts a type of task -that is needed to run against a cluster (e.g. take a backup) and tracks the -state of said task through its workflow. -- `pgpolicies.crunchydata.com`: Stores a reference to a SQL file that can be -executed against a PostgreSQL cluster. In the past, this was used to manage RLS -policies on PostgreSQL clusters. - -Below takes an in depth look for what each attribute does in a Custom Resource -Definition, and how they can be used in the creation and update workflow. - -### Glossary - -- `create`: if an attribute is listed as `create`, it means it can affect what -happens when a new Custom Resource is created. -- `update`: if an attribute is listed as `update`, it means it can affect the -Custom Resource, and by extension the objects it manages, when the attribute is -updated. - -### `pgclusters.crunchydata.com` - -The `pgclusters.crunchydata.com` Custom Resource Definition is the fundamental -definition of a PostgreSQL cluster. Most attributes only affect the deployment -of a PostgreSQL cluster at the time the PostgreSQL cluster is created. Some -attributes can be modified during the lifetime of the PostgreSQL cluster and -make changes, as described below. - -#### Specification (`Spec`) - -| Attribute | Action | Description | -|-----------|--------|-------------| -| Annotations | `create`, `update` | Specify Kubernetes [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) that can be applied to the different deployments managed by the PostgreSQL Operator (PostgreSQL, pgBackRest, pgBouncer). For more information, please see the "Annotations Specification" below. | -| BackrestConfig | `create` | Optional references to pgBackRest configuration files -| BackrestLimits | `create`, `update` | Specify the container resource limits that the pgBackRest repository should use. Follows the [Kubernetes definitions of resource limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | -| BackrestResources | `create`, `update` | Specify the container resource requests that the pgBackRest repository should use. Follows the [Kubernetes definitions of resource requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | -| BackrestS3Bucket | `create` | An optional parameter that specifies a S3 bucket that pgBackRest should use. | -| BackrestS3Endpoint | `create` | An optional parameter that specifies the S3 endpoint pgBackRest should use. | -| BackrestS3Region | `create` | An optional parameter that specifies a cloud region that pgBackRest should use. | -| BackrestS3URIStyle | `create` | An optional parameter that specifies if pgBackRest should use the `path` or `host` S3 URI style. | -| BackrestS3VerifyTLS | `create` | An optional parameter that specifies if pgBackRest should verify the TLS endpoint. | -| BackrestStorage | `create` | A specification that gives information about the storage attributes for the pgBackRest repository, which stores backups and archives, of the PostgreSQL cluster. For details, please see the `Storage Specification` section below. This is required. | -| CCPImage | `create` | The name of the PostgreSQL container image to use, e.g. `crunchy-postgres-ha` or `crunchy-postgres-ha-gis`. | -| CCPImagePrefix | `create` | If provided, the image prefix (or registry) of the PostgreSQL container image, e.g. `registry.developers.crunchydata.com/crunchydata`. The default is to use the image prefix set in the PostgreSQL Operator configuration. | -| CCPImageTag | `create` | The tag of the PostgreSQL container image to use, e.g. `{{< param centosBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}}`. | -| CollectSecretName | `create` | An optional attribute unless `crunchy-postgres-exporter` is specified in the `UserLabels`; contains the name of a Kubernetes Secret that contains the credentials for a PostgreSQL user that is used for metrics collection, and is created when the PostgreSQL cluster is first bootstrapped. For more information, please see `User Secret Specification`.| -| ClusterName | `create` | The name of the PostgreSQL cluster, e.g. `hippo`. This is used to group PostgreSQL instances (primary, replicas) together. | -| CustomConfig | `create` | If specified, references a custom ConfigMap to use when bootstrapping a PostgreSQL cluster. For the shape of this file, please see the section on [Custom Configuration]({{< relref "/advanced/custom-configuration.md" >}}) | -| Database | `create` | The name of a database that the PostgreSQL user can log into after the PostgreSQL cluster is created. | -| ExporterLimits | `create`, `update` | Specify the container resource limits that the `crunchy-postgres-exporter` sidecar uses when it is deployed with a PostgreSQL instance. Follows the [Kubernetes definitions of resource limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | -| ExporterPort | `create` | If the `"crunchy-postgres-exporter"` label is set in `UserLabels`, then this specifies the port that the metrics sidecar runs on (e.g. `9187`) | -| ExporterResources | `create`, `update` | Specify the container resource requests that the `crunchy-postgres-exporter` sidecar uses when it is deployed with a PostgreSQL instance. Follows the [Kubernetes definitions of resource requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | -| Limits | `create`, `update` | Specify the container resource limits that the PostgreSQL cluster should use. Follows the [Kubernetes definitions of resource limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | -| Name | `create` | The name of the PostgreSQL instance that is the primary. On creation, this should be set to be the same as `ClusterName`. | -| Namespace | `create` | The Kubernetes Namespace that the PostgreSQL cluster is deployed in. | -| PGBadgerPort | `create` | If the `"crunchy-pgbadger"` label is set in `UserLabels`, then this specifies the port that the pgBadger sidecar runs on (e.g. `10000`) | -| PGDataSource | `create` | Used to indicate if a PostgreSQL cluster should bootstrap its data from a pgBackRest repository. This uses the PostgreSQL Data Source Specification, described below. | -| PGOImagePrefix | `create` | If provided, the image prefix (or registry) of any PostgreSQL Operator images that are used for jobs, e.g. `registry.developers.crunchydata.com/crunchydata`. The default is to use the image prefix set in the PostgreSQL Operator configuration. | -| PgBouncer | `create`, `update` | If specified, defines the attributes to use for the pgBouncer connection pooling deployment that can be used in conjunction with this PostgreSQL cluster. Please see the specification defined below. | -| PodAntiAffinity | `create` | A required section. Sets the [pod anti-affinity rules]({{< relref "/architecture/high-availability/_index.md#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity" >}}) for the PostgreSQL cluster and associated deployments. Please see the `Pod Anti-Affinity Specification` section below. | -| Policies | `create` | If provided, a comma-separated list referring to `pgpolicies.crunchydata.com.Spec.Name` that should be run once the PostgreSQL primary is first initialized. | -| Port | `create` | The port that PostgreSQL will run on, e.g. `5432`. | -| PrimaryStorage | `create` | A specification that gives information about the storage attributes for the primary instance in the PostgreSQL cluster. For details, please see the `Storage Specification` section below. This is required. | -| RootSecretName | `create` | The name of a Kubernetes Secret that contains the credentials for a PostgreSQL _replication user_ that is created when the PostgreSQL cluster is first bootstrapped. For more information, please see `User Secret Specification`.| -| ReplicaStorage | `create` | A specification that gives information about the storage attributes for any replicas in the PostgreSQL cluster. For details, please see the `Storage Specification` section below. This will likely be changed in the future based on the nature of the high-availability system, but presently it is still required that you set it. It is recommended you use similar settings to that of `PrimaryStorage`. | -| Replicas | `create` | The number of replicas to create after a PostgreSQL primary is first initialized. This only works on create; to scale a cluster after it is initialized, please use the [`pgo scale`]({{< relref "/pgo-client/reference/pgo_scale.md" >}}) command. | -| Resources | `create`, `update` | Specify the container resource requests that the PostgreSQL cluster should use. Follows the [Kubernetes definitions of resource requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | -| RootSecretName | `create` | The name of a Kubernetes Secret that contains the credentials for a PostgreSQL superuser that is created when the PostgreSQL cluster is first bootstrapped. For more information, please see `User Secret Specification`.| -| SyncReplication | `create` | If set to `true`, specifies the PostgreSQL cluster to use [synchronous replication]({{< relref "/architecture/high-availability/_index.md#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity#synchronous-replication-guarding-against-transactions-loss" >}}).| -| User | `create` | The name of the PostgreSQL user that is created when the PostgreSQL cluster is first created. | -| UserLabels | `create` | A set of key-value string pairs that are used as a sort of "catch-all" for things that really should be modeled in the CRD. These values do get copied to the actually CR labels. If you want to set up metrics collection or pgBadger, you would specify `"crunchy-postgres-exporter": "true"` and `"crunchy-pgbadger": "true"` here, respectively. However, this structure does need to be set, so just follow whatever is in the example. | -| UserSecretName | `create` | The name of a Kubernetes Secret that contains the credentials for a standard PostgreSQL user that is created when the PostgreSQL cluster is first bootstrapped. For more information, please see `User Secret Specification`.| -| TablespaceMounts | `create`,`update` | Lists any tablespaces that are attached to the PostgreSQL cluster. Tablespaces can be added at a later time by updating the `TablespaceMounts` entry, but they cannot be removed. Stores a map of information, with the key being the name of the tablespace, and the value being a Storage Specification, defined below. | -| TLS | `create` | Defines the attributes for enabling TLS for a PostgreSQL cluster. See TLS Specification below. | -| TLSOnly | `create` | If set to true, requires client connections to use only TLS to connect to the PostgreSQL database. | -| Standby | `create`, `update` | If set to true, indicates that the PostgreSQL cluster is a "standby" cluster, i.e. is in read-only mode entirely. Please see [Kubernetes Multi-Cluster Deployments]({{< relref "/architecture/high-availability/multi-cluster-kubernetes.md" >}}) for more information. | -| Shutdown | `create`, `update` | If set to true, indicates that a PostgreSQL cluster should shutdown. If set to false, indicates that a PostgreSQL cluster should be up and running. | - -##### Storage Specification - -The storage specification is a spec that defines attributes about the storage to -be used for a particular function of a PostgreSQL cluster (e.g. a primary -instance or for the pgBackRest backup repository). The below describes each -attribute and how it works. - -| Attribute | Action | Description | -|-----------|--------|-------------| -| AccessMode| `create` | The name of the Kubernetes Persistent Volume [Access Mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) to use. | -| MatchLabels | `create` | Only used with `StorageType` of `create`, used to match a particular subset of provisioned Persistent Volumes. | -| Name | `create` | Only needed for `PrimaryStorage` in `pgclusters.crunchydata.com`.Used to identify the name of the PostgreSQL cluster. Should match `ClusterName`. | -| Size | `create` | The size of the [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) (PVC). Must use a Kubernetes resource value, e.g. `20Gi`. | -| StorageClass | `create` | The name of the Kubernetes [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) to use. | -| StorageType | `create` | Set to `create` if storage is provisioned (e.g. using `hostpath`). Set to `dynamic` if using a dynamic storage provisioner, e.g. via a `StorageClass`. | -| SupplementalGroups | `create` | If provided, a comma-separated list of group IDs to use in case it is needed to interface with a particular storage system. Typically used with NFS or hostpath storage. | - -##### Pod Anti-Affinity Specification - -Sets the [pod anti-affinity]({{< relref "/architecture/high-availability/_index.md#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity" >}}) -for the PostgreSQL cluster and associated deployments. Each attribute can -contain one of the following values: - -- `required` -- `preferred` (which is also the recommended default) -- `disabled` - -For a detailed explanation for how this works. Please see the [high-availability]({{< relref "/architecture/high-availability/_index.md#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity" >}}) -documentation. - -| Attribute | Action | Description | -|-----------|--------|-------------| -| Default | `create` | The default pod anti-affinity to use for all Pods managed in a given PostgreSQL cluster. | -| PgBackRest | `create` | If set to a value that differs from `Default`, specifies the pod anti-affinity to use for just the pgBackRest repository. | -| PgBouncer | `create` | If set to a value that differs from `Default`, specifies the pod anti-affinity to use for just the pgBouncer Pods. | - -##### PostgreSQL Data Source Specification - -This specification is used when one wants to bootstrap the data in a PostgreSQL -cluster from a pgBackRest repository. This can be a pgBackRest repository that -is attached to an active PostgreSQL cluster or is kept around to be used for -spawning new PostgreSQL clusters. - -| Attribute | Action | Description | -|-----------|--------|-------------| -| RestoreFrom | `create` | The name of a PostgreSQL cluster, active or former, that will be used for bootstrapping the data of a new PostgreSQL cluster. | -| RestoreOpts | `create` | Additional pgBackRest [restore options](https://pgbackrest.org/command.html#command-restore) that can be used as part of the bootstrapping operation, for example, point-in-time-recovery options. | - -##### TLS Specification - -The TLS specification makes a reference to the various secrets that are required -to enable TLS in a PostgreSQL cluster. For more information on how these secrets -should be structured, please see [Enabling TLS in a PostgreSQL Cluster]({{< relref "/pgo-client/common-tasks.md#enable-tls" >}}). - -| Attribute | Action | Description | -|-----------|--------|-------------| -| CASecret | `create` | A reference to the name of a Kubernetes Secret that specifies a certificate authority for the PostgreSQL cluster to trust. | -| ReplicationTLSSecret | `create` | A reference to the name of a Kubernetes TLS Secret that contains a keypair for authenticating the replication user. Must be used with `CASecret` and `TLSSecret`. | -| TLSSecret | `create` | A reference to the name of a Kubernetes TLS Secret that contains a keypair that is used for the PostgreSQL instance to identify itself and perform TLS communications with PostgreSQL clients. Must be used with `CASecret`. | - -##### pgBouncer Specification - -The pgBouncer specification defines how a pgBouncer deployment can be deployed -alongside the PostgreSQL cluster. pgBouncer is a PostgreSQL connection pooler -that can also help manage connection state, and is helpful to deploy alongside -a PostgreSQL cluster to help with failover scenarios too. - -| Attribute | Action | Description | -|-----------|--------|-------------| -| Limits | `create`, `update` | Specify the container resource limits that the pgBouncer Pods should use. Follows the [Kubernetes definitions of resource limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | -| Replicas | `create`, `update` | The number of pgBouncer instances to deploy. Must be set to at least `1` to deploy pgBouncer. Setting to `0` removes an existing pgBouncer deployment for the PostgreSQL cluster. | -| Resources | `create`, `update` | Specify the container resource requests that the pgBouncer Pods should use. Follows the [Kubernetes definitions of resource requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | - -##### Annotations Specification - -The `pgcluster.crunchydata.com` specification contains a block that allows for -custom [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) -to be applied to the Deployments that are managed by the PostgreSQL Operator, -including: - -- PostgreSQL -- pgBackRest -- pgBouncer - -This also includes the option to apply Annotations globally across the three -different deployment groups. - -| Attribute | Action | Description | -|-----------|--------|-------------| -| Backrest | `create`, `update` | Specify annotations that are only applied to the pgBackRest deployments | -| Global | `create`, `update` | Specify annotations that are applied to the PostgreSQL, pgBackRest, and pgBouncer deployments | -| PgBouncer | `create`, `update` | Specify annotations that are only applied to the pgBouncer deployments | -| Postgres | `create`, `update` | Specify annotations that are only applied to the PostgreSQL deployments | - -### `pgreplicas.crunchydata.com` - -The `pgreplicas.crunchydata.com` Custom Resource Definition contains information -pertaning to the structure of PostgreSQL replicas associated within a PostgreSQL -cluster. All of the attributes only affect the replica when it is created. - -#### Specification (`Spec`) - -| Attribute | Action | Description | -|-----------|--------|-------------| -| ClusterName | `create` | The name of the PostgreSQL cluster, e.g. `hippo`. This is used to group PostgreSQL instances (primary, replicas) together. | -| Name | `create` | The name of this PostgreSQL replica. It should be unique within a `ClusterName`. | -| Namespace | `create` | The Kubernetes Namespace that the PostgreSQL cluster is deployed in. | -| ReplicaStorage | `create` | A specification that gives information about the storage attributes for any replicas in the PostgreSQL cluster. For details, please see the `Storage Specification` section in the `pgclusters.crunchydata.com` description. This will likely be changed in the future based on the nature of the high-availability system, but presently it is still required that you set it. It is recommended you use similar settings to that of `PrimaryStorage`. | -| UserLabels | `create` | A set of key-value string pairs that are used as a sort of "catch-all" for things that really should be modeled in the CRD. These values do get copied to the actually CR labels. If you want to set up metrics collection, you would specify `"crunchy-postgres-exporter": "true"` here. This also allows for node selector pinning using `NodeLabelKey` and `NodeLabelValue`. However, this structure does need to be set, so just follow whatever is in the example. | - ## Custom Resource Workflows ### Create a PostgreSQL Cluster The fundamental workflow for interfacing with a PostgreSQL Operator Custom -Resource Definition is for creating a PostgreSQL cluster. However, this is also -one of the most complicated workflows to go through, as there are several -Kubernetes objects that need to be created prior to using this method. These -include: +Resource Definition is for creating a PostgreSQL cluster. There are several +that a PostgreSQL cluster requires to be deployed, including: - Secrets - Information for setting up a pgBackRest repository @@ -252,80 +52,128 @@ include: Additionally, if you want to add some of the other sidecars, you may need to create additional secrets. -The following guide goes through how to create a PostgreSQL cluster called -`hippo` by creating a new custom resource. - -#### Step 1: Create the pgBackRest Secret - -pgBackRest is a fundamental part of a PostgreSQL deployment with the PostgreSQL -Operator: not only is it a backup and archive repository, but it also helps with -operations such as self-healing. A PostgreSQL instance a pgBackRest communicate -using ssh, and as such, we need to generate a unique ssh keypair for -communication for each PostgreSQL cluster we deploy. +The good news is that if you do not provide these objects, the PostgreSQL +Operator will create them for you to get your Postgres cluster up and running! -In this example, we generate a ssh keypair using ED25519 keys, but if your -environment requires it, you can also use RSA keys. - -In your working directory, run the following commands: +The following goes through how to create a PostgreSQL cluster called +`hippo` by creating a new custom resource. -
+```
 # this variable is the name of the cluster being created
 export pgo_cluster_name=hippo
 # this variable is the namespace the cluster is being deployed into
 export cluster_namespace=pgo
 
-# generate a SSH public/private keypair for use by pgBackRest
-ssh-keygen -t ed25519 -N '' -f "${pgo_cluster_name}-key"
-
-# base64 encoded the keys for the generation of the Kubernetes secret, and place
-# them into variables temporarily
-public_key_temp=$(cat "${pgo_cluster_name}-key.pub" | base64)
-private_key_temp=$(cat "${pgo_cluster_name}-key" | base64)
-export pgbackrest_public_key="${public_key_temp//[$'\n']}" pgbackrest_private_key="${private_key_temp//[$'\n']}"
-
-# create the backrest-repo-config example file and substitute in the newly
-# created keys
-#
-# (Note: that the "config" / "sshd_config" entries contain configuration to
-# ensure that PostgreSQL instances are able to communicate with the pgBackRest
-# repository, which houses backups and archives, and vice versa. Most of the
-# settings follow the sshd defaults, with a few overrides. Edit at your own
-# discretion.)
-cat <<-EOF > "${pgo_cluster_name}-backrest-repo-config.yaml"
-apiVersion: v1
-kind: Secret
-type: Opaque
+cat <<-EOF > "${pgo_cluster_name}-pgcluster.yaml"
+apiVersion: crunchydata.com/v1
+kind: Pgcluster
 metadata:
+  annotations:
+    current-primary: ${pgo_cluster_name}
   labels:
+    crunchy-pgha-scope: ${pgo_cluster_name}
+    deployment-name: ${pgo_cluster_name}
+    name: ${pgo_cluster_name}
     pg-cluster: ${pgo_cluster_name}
-    pgo-backrest-repo: "true"
-  name: ${pgo_cluster_name}-backrest-repo-config
+    pgo-version: {{< param operatorVersion >}}
+    pgouser: admin
+  name: ${pgo_cluster_name}
+  namespace: ${cluster_namespace}
+spec:
+  BackrestStorage:
+    accessmode: ReadWriteMany
+    matchLabels: ""
+    name: ""
+    size: 1G
+    storageclass: ""
+    storagetype: dynamic
+    supplementalgroups: ""
+  PrimaryStorage:
+    accessmode: ReadWriteMany
+    matchLabels: ""
+    name: ${pgo_cluster_name}
+    size: 1G
+    storageclass: ""
+    storagetype: dynamic
+    supplementalgroups: ""
+  ReplicaStorage:
+    accessmode: ReadWriteMany
+    matchLabels: ""
+    name: ""
+    size: 1G
+    storageclass: ""
+    storagetype: dynamic
+    supplementalgroups: ""
+  annotations: {}
+  ccpimage: crunchy-postgres-ha
+  ccpimageprefix: registry.developers.crunchydata.com/crunchydata
+  ccpimagetag: {{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}}
+  clustername: ${pgo_cluster_name}
+  database: ${pgo_cluster_name}
+  exporterport: "9187"
+  limits: {}
+  name: ${pgo_cluster_name}
   namespace: ${cluster_namespace}
-data:
-  authorized_keys: ${pgbackrest_public_key}
-  id_ed25519: ${pgbackrest_private_key}
-  ssh_host_ed25519_key: ${pgbackrest_private_key}
-  config: SG9zdCAqClN0cmljdEhvc3RLZXlDaGVja2luZyBubwpJZGVudGl0eUZpbGUgL3RtcC9pZF9lZDI1NTE5ClBvcnQgMjAyMgpVc2VyIHBnYmFja3Jlc3QK
-  sshd_config: IwkkT3BlbkJTRDogc3NoZF9jb25maWcsdiAxLjEwMCAyMDE2LzA4LzE1IDEyOjMyOjA0IG5hZGR5IEV4cCAkCgojIFRoaXMgaXMgdGhlIHNzaGQgc2VydmVyIHN5c3RlbS13aWRlIGNvbmZpZ3VyYXRpb24gZmlsZS4gIFNlZQojIHNzaGRfY29uZmlnKDUpIGZvciBtb3JlIGluZm9ybWF0aW9uLgoKIyBUaGlzIHNzaGQgd2FzIGNvbXBpbGVkIHdpdGggUEFUSD0vdXNyL2xvY2FsL2JpbjovdXNyL2JpbgoKIyBUaGUgc3RyYXRlZ3kgdXNlZCBmb3Igb3B0aW9ucyBpbiB0aGUgZGVmYXVsdCBzc2hkX2NvbmZpZyBzaGlwcGVkIHdpdGgKIyBPcGVuU1NIIGlzIHRvIHNwZWNpZnkgb3B0aW9ucyB3aXRoIHRoZWlyIGRlZmF1bHQgdmFsdWUgd2hlcmUKIyBwb3NzaWJsZSwgYnV0IGxlYXZlIHRoZW0gY29tbWVudGVkLiAgVW5jb21tZW50ZWQgb3B0aW9ucyBvdmVycmlkZSB0aGUKIyBkZWZhdWx0IHZhbHVlLgoKIyBJZiB5b3Ugd2FudCB0byBjaGFuZ2UgdGhlIHBvcnQgb24gYSBTRUxpbnV4IHN5c3RlbSwgeW91IGhhdmUgdG8gdGVsbAojIFNFTGludXggYWJvdXQgdGhpcyBjaGFuZ2UuCiMgc2VtYW5hZ2UgcG9ydCAtYSAtdCBzc2hfcG9ydF90IC1wIHRjcCAjUE9SVE5VTUJFUgojClBvcnQgMjAyMgojQWRkcmVzc0ZhbWlseSBhbnkKI0xpc3RlbkFkZHJlc3MgMC4wLjAuMAojTGlzdGVuQWRkcmVzcyA6OgoKSG9zdEtleSAvc3NoZC9zc2hfaG9zdF9lZDI1NTE5X2tleQoKIyBDaXBoZXJzIGFuZCBrZXlpbmcKI1Jla2V5TGltaXQgZGVmYXVsdCBub25lCgojIExvZ2dpbmcKI1N5c2xvZ0ZhY2lsaXR5IEFVVEgKU3lzbG9nRmFjaWxpdHkgQVVUSFBSSVYKI0xvZ0xldmVsIElORk8KCiMgQXV0aGVudGljYXRpb246CgojTG9naW5HcmFjZVRpbWUgMm0KUGVybWl0Um9vdExvZ2luIG5vClN0cmljdE1vZGVzIG5vCiNNYXhBdXRoVHJpZXMgNgojTWF4U2Vzc2lvbnMgMTAKClB1YmtleUF1dGhlbnRpY2F0aW9uIHllcwoKIyBUaGUgZGVmYXVsdCBpcyB0byBjaGVjayBib3RoIC5zc2gvYXV0aG9yaXplZF9rZXlzIGFuZCAuc3NoL2F1dGhvcml6ZWRfa2V5czIKIyBidXQgdGhpcyBpcyBvdmVycmlkZGVuIHNvIGluc3RhbGxhdGlvbnMgd2lsbCBvbmx5IGNoZWNrIC5zc2gvYXV0aG9yaXplZF9rZXlzCiNBdXRob3JpemVkS2V5c0ZpbGUJL3BnY29uZi9hdXRob3JpemVkX2tleXMKQXV0aG9yaXplZEtleXNGaWxlCS9zc2hkL2F1dGhvcml6ZWRfa2V5cwoKI0F1dGhvcml6ZWRQcmluY2lwYWxzRmlsZSBub25lCgojQXV0aG9yaXplZEtleXNDb21tYW5kIG5vbmUKI0F1dGhvcml6ZWRLZXlzQ29tbWFuZFVzZXIgbm9ib2R5CgojIEZvciB0aGlzIHRvIHdvcmsgeW91IHdpbGwgYWxzbyBuZWVkIGhvc3Qga2V5cyBpbiAvZXRjL3NzaC9zc2hfa25vd25faG9zdHMKI0hvc3RiYXNlZEF1dGhlbnRpY2F0aW9uIG5vCiMgQ2hhbmdlIHRvIHllcyBpZiB5b3UgZG9uJ3QgdHJ1c3Qgfi8uc3NoL2tub3duX2hvc3RzIGZvcgojIEhvc3RiYXNlZEF1dGhlbnRpY2F0aW9uCiNJZ25vcmVVc2VyS25vd25Ib3N0cyBubwojIERvbid0IHJlYWQgdGhlIHVzZXIncyB+Ly5yaG9zdHMgYW5kIH4vLnNob3N0cyBmaWxlcwojSWdub3JlUmhvc3RzIHllcwoKIyBUbyBkaXNhYmxlIHR1bm5lbGVkIGNsZWFyIHRleHQgcGFzc3dvcmRzLCBjaGFuZ2UgdG8gbm8gaGVyZSEKI1Bhc3N3b3JkQXV0aGVudGljYXRpb24geWVzCiNQZXJtaXRFbXB0eVBhc3N3b3JkcyBubwpQYXNzd29yZEF1dGhlbnRpY2F0aW9uIG5vCgojIENoYW5nZSB0byBubyB0byBkaXNhYmxlIHMva2V5IHBhc3N3b3JkcwpDaGFsbGVuZ2VSZXNwb25zZUF1dGhlbnRpY2F0aW9uIHllcwojQ2hhbGxlbmdlUmVzcG9uc2VBdXRoZW50aWNhdGlvbiBubwoKIyBLZXJiZXJvcyBvcHRpb25zCiNLZXJiZXJvc0F1dGhlbnRpY2F0aW9uIG5vCiNLZXJiZXJvc09yTG9jYWxQYXNzd2QgeWVzCiNLZXJiZXJvc1RpY2tldENsZWFudXAgeWVzCiNLZXJiZXJvc0dldEFGU1Rva2VuIG5vCiNLZXJiZXJvc1VzZUt1c2Vyb2sgeWVzCgojIEdTU0FQSSBvcHRpb25zCiNHU1NBUElBdXRoZW50aWNhdGlvbiB5ZXMKI0dTU0FQSUNsZWFudXBDcmVkZW50aWFscyBubwojR1NTQVBJU3RyaWN0QWNjZXB0b3JDaGVjayB5ZXMKI0dTU0FQSUtleUV4Y2hhbmdlIG5vCiNHU1NBUElFbmFibGVrNXVzZXJzIG5vCgojIFNldCB0aGlzIHRvICd5ZXMnIHRvIGVuYWJsZSBQQU0gYXV0aGVudGljYXRpb24sIGFjY291bnQgcHJvY2Vzc2luZywKIyBhbmQgc2Vzc2lvbiBwcm9jZXNzaW5nLiBJZiB0aGlzIGlzIGVuYWJsZWQsIFBBTSBhdXRoZW50aWNhdGlvbiB3aWxsCiMgYmUgYWxsb3dlZCB0aHJvdWdoIHRoZSBDaGFsbGVuZ2VSZXNwb25zZUF1dGhlbnRpY2F0aW9uIGFuZAojIFBhc3N3b3JkQXV0aGVudGljYXRpb24uICBEZXBlbmRpbmcgb24geW91ciBQQU0gY29uZmlndXJhdGlvbiwKIyBQQU0gYXV0aGVudGljYXRpb24gdmlhIENoYWxsZW5nZVJlc3BvbnNlQXV0aGVudGljYXRpb24gbWF5IGJ5cGFzcwojIHRoZSBzZXR0aW5nIG9mICJQZXJtaXRSb290TG9naW4gd2l0aG91dC1wYXNzd29yZCIuCiMgSWYgeW91IGp1c3Qgd2FudCB0aGUgUEFNIGFjY291bnQgYW5kIHNlc3Npb24gY2hlY2tzIHRvIHJ1biB3aXRob3V0CiMgUEFNIGF1dGhlbnRpY2F0aW9uLCB0aGVuIGVuYWJsZSB0aGlzIGJ1dCBzZXQgUGFzc3dvcmRBdXRoZW50aWNhdGlvbgojIGFuZCBDaGFsbGVuZ2VSZXNwb25zZUF1dGhlbnRpY2F0aW9uIHRvICdubycuCiMgV0FSTklORzogJ1VzZVBBTSBubycgaXMgbm90IHN1cHBvcnRlZCBpbiBSZWQgSGF0IEVudGVycHJpc2UgTGludXggYW5kIG1heSBjYXVzZSBzZXZlcmFsCiMgcHJvYmxlbXMuClVzZVBBTSB5ZXMKCiNBbGxvd0FnZW50Rm9yd2FyZGluZyB5ZXMKI0FsbG93VGNwRm9yd2FyZGluZyB5ZXMKI0dhdGV3YXlQb3J0cyBubwpYMTFGb3J3YXJkaW5nIHllcwojWDExRGlzcGxheU9mZnNldCAxMAojWDExVXNlTG9jYWxob3N0IHllcwojUGVybWl0VFRZIHllcwojUHJpbnRNb3RkIHllcwojUHJpbnRMYXN0TG9nIHllcwojVENQS2VlcEFsaXZlIHllcwojVXNlTG9naW4gbm8KI1Blcm1pdFVzZXJFbnZpcm9ubWVudCBubwojQ29tcHJlc3Npb24gZGVsYXllZAojQ2xpZW50QWxpdmVJbnRlcnZhbCAwCiNDbGllbnRBbGl2ZUNvdW50TWF4IDMKI1Nob3dQYXRjaExldmVsIG5vCiNVc2VETlMgeWVzCiNQaWRGaWxlIC92YXIvcnVuL3NzaGQucGlkCiNNYXhTdGFydHVwcyAxMDozMDoxMDAKI1Blcm1pdFR1bm5lbCBubwojQ2hyb290RGlyZWN0b3J5IG5vbmUKI1ZlcnNpb25BZGRlbmR1bSBub25lCgojIG5vIGRlZmF1bHQgYmFubmVyIHBhdGgKI0Jhbm5lciBub25lCgojIEFjY2VwdCBsb2NhbGUtcmVsYXRlZCBlbnZpcm9ubWVudCB2YXJpYWJsZXMKQWNjZXB0RW52IExBTkcgTENfQ1RZUEUgTENfTlVNRVJJQyBMQ19USU1FIExDX0NPTExBVEUgTENfTU9ORVRBUlkgTENfTUVTU0FHRVMKQWNjZXB0RW52IExDX1BBUEVSIExDX05BTUUgTENfQUREUkVTUyBMQ19URUxFUEhPTkUgTENfTUVBU1VSRU1FTlQKQWNjZXB0RW52IExDX0lERU5USUZJQ0FUSU9OIExDX0FMTCBMQU5HVUFHRQpBY2NlcHRFbnYgWE1PRElGSUVSUwoKIyBvdmVycmlkZSBkZWZhdWx0IG9mIG5vIHN1YnN5c3RlbXMKU3Vic3lzdGVtCXNmdHAJL3Vzci9saWJleGVjL29wZW5zc2gvc2Z0cC1zZXJ2ZXIKCiMgRXhhbXBsZSBvZiBvdmVycmlkaW5nIHNldHRpbmdzIG9uIGEgcGVyLXVzZXIgYmFzaXMKI01hdGNoIFVzZXIgYW5vbmN2cwojCVgxMUZvcndhcmRpbmcgbm8KIwlBbGxvd1RjcEZvcndhcmRpbmcgbm8KIwlQZXJtaXRUVFkgbm8KIwlGb3JjZUNvbW1hbmQgY3ZzIHNlcnZlcgo=
+  pgDataSource:
+    restoreFrom: ""
+    restoreOpts: ""
+  pgbadgerport: "10000"
+  pgoimageprefix: registry.developers.crunchydata.com/crunchydata
+  podAntiAffinity:
+    default: preferred
+    pgBackRest: preferred
+    pgBouncer: preferred
+  port: "5432"
+  tolerations: []
+  user: hippo
+  userlabels:
+    pgo-version: {{< param operatorVersion >}}
 EOF
 
-# remove the pgBackRest ssh keypair from the shell session
-unset pgbackrest_public_key pgbackrest_private_key
-
-# create the pgBackRest secret
-kubectl apply -f "${pgo_cluster_name}-backrest-repo-config.yaml"
-
+kubectl apply -f "${pgo_cluster_name}-pgcluster.yaml" +``` -#### Step 2: Creating the PostgreSQL User Secrets +And that's all! The PostgreSQL Operator will go ahead and create the cluster. -As mentioned above, there are a minimum of three PostgreSQL user accounts that -you must create in order to bootstrap a PostgreSQL cluster. These are: +As part of this process, the PostgreSQL Operator creates several Secrets that +contain the credentials for three user accounts that must be present in order +to bootstrap a PostgreSQL cluster. These are: - A PostgreSQL superuser - A replication user - A standard PostgreSQL user -The below code will help you set up these Secrets. +The Secrets represent the following PostgreSQL users and can be identified using +the below patterns: + +| PostgreSQL User | Type | Secret Pattern | Notes | +| --------------- | ----------- | ---------------------------------- | ----- | +| `postgres` | Superuser | `-postgres-secret` | This is the PostgreSQL superuser account. Using the above example, the name of the secret would be `hippo-postgres-secret`. | +| `primaryuser` | Replication | `-primaryuser-secret` | This is for the managed replication user account for maintaining high availability. This account does not need to be accessed. Using the above example, the name of the secret would be `hippo-primaryuser-secret`. | +| User | User | `--secret` | This is an unprivileged user that should be used for most operations. This secret is set by the `user` attribute in the custom resources. In the above example, the name of this user is `hippo`, which would make the Secret `hippo-hippo-secret` | + +To extract the user credentials so you can log into the database, you can use +the following JSONPath expression: + +``` +# namespace that the cluster is running in +export cluster_namespace=pgo +# name of the cluster +export pgo_cluster_name=hippo +# name of the user whose password we want to get +export pgo_cluster_username=hippo + +kubectl -n "${cluster_namespace}" get secrets \ + "${pgo_cluster_name}-${pgo_cluster_username}-secret" -o "jsonpath={.data['password']}" | base64 -d +``` + +#### Customizing User Credentials + +If you wish to set the credentials for these users on your own, you have to +create these Secrets _before_ creating a custom resource. The below example +shows how to create the three required user accounts prior to creating a custom +resource. Note that if you omit any of these Secrets, the Postgres Operator +will create it on its own. ``` # this variable is the name of the cluster being created @@ -354,21 +202,55 @@ kubectl label secrets -n "${cluster_namespace}" "${pgo_cluster_name}-primaryuser kubectl label secrets -n "${cluster_namespace}" "${pgo_cluster_name}-hippo-secret" "pg-cluster=${pgo_cluster_name}" ``` -#### Step 3: Create the PostgreSQL Cluster +### Create a PostgreSQL Cluster With Backups in S3 + +A frequent use case is to create a PostgreSQL cluster with S3 or a S3-like +storage system for storing backups. This requires adding a Secret that contains +the S3 key and key secret for your account, and adding some additional +information into the custom resource. + +#### Step 1: Create the pgBackRest S3 Secrets + +As mentioned above, it is necessary to create a Secret containing the S3 key and +key secret that will allow a user to create backups in S3. + +The below code will help you set up this Secret. + +``` +# this variable is the name of the cluster being created +pgo_cluster_name=hippo +# this variable is the namespace the cluster is being deployed into +cluster_namespace=pgo +# the following variables are your S3 key and key secret +backrest_s3_key=yours3key +backrest_s3_key_secret=yours3keysecret + +kubectl -n "${cluster_namespace}" create secret generic "${pgo_cluster_name}-backrest-repo-config" \ + --from-literal="aws-s3-key=${backrest_s3_key}" \ + --from-literal="aws-s3-key-secret=${backrest_s3_key_secret}" + +unset backrest_s3_key +unset backrest_s3_key_secret +``` + +#### Step 2: Create the PostgreSQL Cluster With the Secrets in place. It is now time to create the PostgreSQL cluster. The below manifest references the Secrets created in the previous step to add a custom resource to the `pgclusters.crunchydata.com` custom resource definition. - -**NOTE**: You will need to modify the storage sections to match your storage -configuration. +There are some additions in this example specifically for storing backups in S3. ``` # this variable is the name of the cluster being created export pgo_cluster_name=hippo # this variable is the namespace the cluster is being deployed into export cluster_namespace=pgo +# the following variables store the information for your S3 cluster. You may +# need to adjust them for your actual settings +export backrest_s3_bucket=your-bucket +export backrest_s3_endpoint=s3.region-name.amazonaws.com +export backrest_s3_region=region-name cat <<-EOF > "${pgo_cluster_name}-pgcluster.yaml" apiVersion: crunchydata.com/v1 @@ -377,15 +259,10 @@ metadata: annotations: current-primary: ${pgo_cluster_name} labels: - autofail: "true" - crunchy-pgbadger: "false" crunchy-pgha-scope: ${pgo_cluster_name} - crunchy-postgres-exporter: "false" deployment-name: ${pgo_cluster_name} name: ${pgo_cluster_name} pg-cluster: ${pgo_cluster_name} - pg-pod-anti-affinity: "" - pgo-backrest: "true" pgo-version: {{< param operatorVersion >}} pgouser: admin name: ${pgo_cluster_name} @@ -397,7 +274,7 @@ spec: name: "" size: 1G storageclass: "" - storagetype: create + storagetype: dynamic supplementalgroups: "" PrimaryStorage: accessmode: ReadWriteMany @@ -405,7 +282,7 @@ spec: name: ${pgo_cluster_name} size: 1G storageclass: "" - storagetype: create + storagetype: dynamic supplementalgroups: "" ReplicaStorage: accessmode: ReadWriteMany @@ -413,31 +290,145 @@ spec: name: "" size: 1G storageclass: "" - storagetype: create + storagetype: dynamic supplementalgroups: "" - annotations: - backrestLimits: {} - backrestRepoPath: "" - backrestResources: - memory: 48Mi - backrestS3Bucket: "" - backrestS3Endpoint: "" - backrestS3Region: "" + annotations: {} + backrestStorageTypes: + - s3 + backrestS3Bucket: ${backrest_s3_bucket} + backrestS3Endpoint: ${backrest_s3_endpoint} + backrestS3Region: ${backrest_s3_region} backrestS3URIStyle: "" backrestS3VerifyTLS: "" ccpimage: crunchy-postgres-ha ccpimageprefix: registry.developers.crunchydata.com/crunchydata - ccpimagetag: {{< param centosBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}} + ccpimagetag: {{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}} + clustername: ${pgo_cluster_name} + database: ${pgo_cluster_name} + exporterport: "9187" + limits: {} + name: ${pgo_cluster_name} + namespace: ${cluster_namespace} + pgDataSource: + restoreFrom: "" + restoreOpts: "" + pgbadgerport: "10000" + pgoimageprefix: registry.developers.crunchydata.com/crunchydata + podAntiAffinity: + default: preferred + pgBackRest: preferred + pgBouncer: preferred + port: "5432" + tolerations: [] + user: hippo + userlabels: + pgo-version: {{< param operatorVersion >}} +EOF + +kubectl apply -f "${pgo_cluster_name}-pgcluster.yaml" +``` + +### Create a PostgreSQL Cluster with TLS + +There are three items that are required to enable TLS in your PostgreSQL clusters: + +- A CA certificate +- A TLS private key +- A TLS certificate + +It is possible [create a PostgreSQL cluster with TLS]({{< relref "tutorial/tls.md" >}}) using a custom resource workflow with the prerequisite of ensuring the above three items are created. + +For a detailed explanation for how TLS works with the PostgreSQL Operator, please see the [TLS tutorial]({{< relref "tutorial/tls.md" >}}). + +#### Step 1: Create TLS Secrets + +There are two Secrets that need to be created: + +1. A Secret containing the certificate authority (CA). You may only need to create this Secret once, as a CA certificate can be shared amongst your clusters. +2. A Secret that contains the TLS private key & certificate. + +This assumes that you have already [generated your TLS certificates](https://www.postgresql.org/docs/current/ssl-tcp.html#SSL-CERTIFICATE-CREATION) where the CA is named `ca.crt` and the server key and certificate are named `server.key` and `server.crt` respectively. + +Substitute the correct values for your environment into the environmental variables in the example below: + +``` +# this variable is the name of the cluster being created +export pgo_cluster_name=hippo +# this variable is the namespace the cluster is being deployed into +export cluster_namespace=pgo +# this is the local path to where you stored the CA and server key and certificate +export cluster_tls_asset_path=/path/to + +# create the CA secret. if this already exists, it's OK if it fails +kubectl create secret generic postgresql-ca -n "${cluster_namespace}" \ + --from-file="ca.crt=${cluster_tls_asset_path}/ca.crt" + +# create the server key/certificate secret +kubectl create secret tls "${pgo_cluster_name}-tls-keypair" -n "${cluster_namespace}" \ + --cert="${cluster_tls_asset_path}/server.crt" \ + --key="${cluster_tls_asset_path}/server.key" +``` + +#### Step 2: Create the PostgreSQL Cluster + +The below example uses the Secrets created in the previous step and creates a TLS-enabled PostgreSQL cluster. Additionally, this example sets the `tlsOnly` attribute to `true`, which requires all TCP connections to occur over TLS: + +``` +# this variable is the name of the cluster being created +export pgo_cluster_name=hippo +# this variable is the namespace the cluster is being deployed into +export cluster_namespace=pgo + +cat <<-EOF > "${pgo_cluster_name}-pgcluster.yaml" +apiVersion: crunchydata.com/v1 +kind: Pgcluster +metadata: + annotations: + current-primary: ${pgo_cluster_name} + labels: + crunchy-pgha-scope: ${pgo_cluster_name} + deployment-name: ${pgo_cluster_name} + name: ${pgo_cluster_name} + pg-cluster: ${pgo_cluster_name} + pgo-version: {{< param operatorVersion >}} + pgouser: admin + name: ${pgo_cluster_name} + namespace: ${cluster_namespace} +spec: + BackrestStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: "" + size: 1G + storageclass: "" + storagetype: dynamic + supplementalgroups: "" + PrimaryStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: ${pgo_cluster_name} + size: 1G + storageclass: "" + storagetype: dynamic + supplementalgroups: "" + ReplicaStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: "" + size: 1G + storageclass: "" + storagetype: dynamic + supplementalgroups: "" + annotations: {} + ccpimage: crunchy-postgres-ha + ccpimageprefix: registry.developers.crunchydata.com/crunchydata + ccpimagetag: {{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}} clustername: ${pgo_cluster_name} - customconfig: "" database: ${pgo_cluster_name} exporterport: "9187" limits: {} name: ${pgo_cluster_name} namespace: ${cluster_namespace} - pgBouncer: - limits: {} - replicas: 0 pgDataSource: restoreFrom: "" restoreOpts: "" @@ -447,25 +438,14 @@ spec: default: preferred pgBackRest: preferred pgBouncer: preferred - policies: "" port: "5432" - primarysecretname: ${pgo_cluster_name}-primaryuser-secret - replicas: "0" - rootsecretname: ${pgo_cluster_name}-postgres-secret - shutdown: false - standby: false - tablespaceMounts: {} tls: - caSecret: "" - replicationTLSSecret: "" - tlsSecret: "" - tlsOnly: false + caSecret: postgresql-ca + tlsSecret: ${pgo_cluster_name}-tls-keypair + tlsOnly: true user: hippo userlabels: - crunchy-postgres-exporter: "false" - pg-pod-anti-affinity: "" pgo-version: {{< param operatorVersion >}} - usersecretname: ${pgo_cluster_name}-hippo-secret EOF kubectl apply -f "${pgo_cluster_name}-pgcluster.yaml" @@ -478,7 +458,7 @@ There following modification operations are supported on the #### Modify Resource Requests & Limits -Modifying the `resources`, `limits`, `backrestResources`, `backRestLimits`, +Modifying the `resources`, `limits`, `backrestResources`, `backrestLimits`, `pgBouncer.resources`, or `pgbouncer.limits` will cause the PostgreSQL Operator to apply the new values to the affected [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/). @@ -554,13 +534,10 @@ spec: name: ${pgo_cluster_name}-${pgo_cluster_replica_suffix} size: 1G storageclass: "" - storagetype: create + storagetype: dynamic supplementalgroups: "" + tolerations: [] userlabels: - NodeLabelKey: "" - NodeLabelValue: "" - crunchy-postgres-exporter: "false" - pg-pod-anti-affinity: "" pgo-version: {{< param operatorVersion >}} EOF @@ -569,6 +546,12 @@ kubectl apply -f "${pgo_cluster_name}-${pgo_cluster_replica_suffix}-pgreplica.ya Add this time, removing a replica must be handled through the [`pgo` client]({{< relref "/pgo-client/common-tasks.md#high-availability-scaling-up-down">}}). +### Monitoring + +To enable the [monitoring]({{< relref "/architecture/monitoring.md">}}) +(aka metrics) sidecar using the `crunchy-postgres-exporter` container, you need +to set the `exporter` attribute in `pgclusters.crunchydata.com` custom resource. + ### Add a Tablespace Tablespaces can be added during the lifetime of a PostgreSQL cluster (tablespaces can be removed as well, but for a detailed explanation as to how, please see the [Tablespaces]({{< relref "/architecture/tablespaces.md">}}) section). @@ -600,7 +583,7 @@ tablespaceMounts: matchLabels: "" size: 5Gi storageclass: "" - storagetype: create + storagetype: dynamic supplementalgroups: "" ``` @@ -690,3 +673,232 @@ spec: Save your edits, and in a short period of time, you should see these annotations applied to the managed Deployments. + +### Delete a PostgreSQL Cluster + +A PostgreSQL cluster can be deleted by simply deleting the `pgclusters.crunchydata.com` resource. + +It is possible to keep both the PostgreSQL data directory as well as the pgBackRest backup repository when using this method by setting the following annotations on the `pgclusters.crunchydata.com` custom resource: + +- `keep-backups`: indicates to keep the pgBackRest PVC when deleting the cluster. +- `keep-data`: indicates to keep the PostgreSQL data PVC when deleting the cluster. + +## PostgreSQL Operator Custom Resource Definitions + +There are several PostgreSQL Operator Custom Resource Definitions (CRDs) that +are installed in order for the PostgreSQL Operator to successfully function: + +- `pgclusters.crunchydata.com`: Stores information required to manage a +PostgreSQL cluster. This includes things like the cluster name, what storage and +resource classes to use, which version of PostgreSQL to run, information about +how to maintain a high-availability cluster, etc. +- `pgreplicas.crunchydata.com`: Stores information required to manage the +replicas within a PostgreSQL cluster. This includes things like the number of +replicas, what storage and resource classes to use, special affinity rules, etc. +- `pgtasks.crunchydata.com`: A general purpose CRD that accepts a type of task +that is needed to run against a cluster (e.g. take a backup) and tracks the +state of said task through its workflow. +- `pgpolicies.crunchydata.com`: Stores a reference to a SQL file that can be +executed against a PostgreSQL cluster. In the past, this was used to manage RLS +policies on PostgreSQL clusters. + +Below takes an in depth look for what each attribute does in a Custom Resource +Definition, and how they can be used in the creation and update workflow. + +### Glossary + +- `create`: if an attribute is listed as `create`, it means it can affect what +happens when a new Custom Resource is created. +- `update`: if an attribute is listed as `update`, it means it can affect the +Custom Resource, and by extension the objects it manages, when the attribute is +updated. + +### `pgclusters.crunchydata.com` + +The `pgclusters.crunchydata.com` Custom Resource Definition is the fundamental +definition of a PostgreSQL cluster. Most attributes only affect the deployment +of a PostgreSQL cluster at the time the PostgreSQL cluster is created. Some +attributes can be modified during the lifetime of the PostgreSQL cluster and +make changes, as described below. + +#### Specification (`Spec`) + +| Attribute | Action | Description | +|-----------|--------|-------------| +| annotations | `create`, `update` | Specify Kubernetes [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) that can be applied to the different deployments managed by the PostgreSQL Operator (PostgreSQL, pgBackRest, pgBouncer). For more information, please see the "Annotations Specification" below. | +| backrestConfig | `create` | Optional references to pgBackRest configuration files | +| backrestLimits | `create`, `update` | Specify the container resource limits that the pgBackRest repository should use. Follows the [Kubernetes definitions of resource limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | +| backrestRepoPath | `create` | Optional reference to the location of the pgBackRest repository. | +| backrestResources | `create`, `update` | Specify the container resource requests that the pgBackRest repository should use. Follows the [Kubernetes definitions of resource requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | +| backrestS3Bucket | `create` | An optional parameter that specifies a S3 bucket that pgBackRest should use. | +| backrestS3Endpoint | `create` | An optional parameter that specifies the S3 endpoint pgBackRest should use. | +| backrestS3Region | `create` | An optional parameter that specifies a cloud region that pgBackRest should use. | +| backrestS3URIStyle | `create` | An optional parameter that specifies if pgBackRest should use the `path` or `host` S3 URI style. | +| backrestS3VerifyTLS | `create` | An optional parameter that specifies if pgBackRest should verify the TLS endpoint. | +| BackrestStorage | `create` | A specification that gives information about the storage attributes for the pgBackRest repository, which stores backups and archives, of the PostgreSQL cluster. For details, please see the `Storage Specification` section below. This is required. | +| backrestStorageTypes | `create` | An optional parameter that takes an array of different repositories types that can be used to store pgBackRest backups. Choices are `posix` and `s3`. If nothing is specified, it defaults to `posix`. (`local`, equivalent to `posix`, is available for backwards compatibility).| +| ccpimage | `create` | The name of the PostgreSQL container image to use, e.g. `crunchy-postgres-ha` or `crunchy-postgres-ha-gis`. | +| ccpimageprefix | `create` | If provided, the image prefix (or registry) of the PostgreSQL container image, e.g. `registry.developers.crunchydata.com/crunchydata`. The default is to use the image prefix set in the PostgreSQL Operator configuration. | +| ccpimagetag | `create` | The tag of the PostgreSQL container image to use, e.g. `{{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}}`. | +| clustername | `create` | The name of the PostgreSQL cluster, e.g. `hippo`. This is used to group PostgreSQL instances (primary, replicas) together. | +| customconfig | `create` | If specified, references a custom ConfigMap to use when bootstrapping a PostgreSQL cluster. For the shape of this file, please see the section on [Custom Configuration]({{< relref "/advanced/custom-configuration.md" >}}) | +| database | `create` | The name of a database that the PostgreSQL user can log into after the PostgreSQL cluster is created. | +| disableAutofail | `create`, `update` | If set to true, disables the high availability capabilities of a PostgreSQL cluster. By default, every cluster can have high availability if there is at least one replica. | +| exporter | `create`,`update` | If `true`, deploys the `crunchy-postgres-exporter` sidecar for metrics collection | +| exporterLimits | `create`, `update` | Specify the container resource limits that the `crunchy-postgres-exporter` sidecar uses when it is deployed with a PostgreSQL instance. Follows the [Kubernetes definitions of resource limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | +| exporterport | `create` | If `Exporter` is `true`, then this specifies the port that the metrics sidecar runs on (e.g. `9187`) | +| exporterResources | `create`, `update` | Specify the container resource requests that the `crunchy-postgres-exporter` sidecar uses when it is deployed with a PostgreSQL instance. Follows the [Kubernetes definitions of resource requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | +| limits | `create`, `update` | Specify the container resource limits that the PostgreSQL cluster should use. Follows the [Kubernetes definitions of resource limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | +| name | `create` | The name of the PostgreSQL instance that is the primary. On creation, this should be set to be the same as `ClusterName`. | +| namespace | `create` | The Kubernetes Namespace that the PostgreSQL cluster is deployed in. | +| nodeAffinity | `create` | Sets the [node affinity rules](/architecture/high-availability/#node-affinity) for the PostgreSQL cluster and associated PostgreSQL instances. Can be overridden on a per-instance (`pgreplicas.crunchydata.com`) basis. Please see the `Node Affinity Specification` section below. | +| pgBadger | `create`,`update` | If `true`, deploys the `crunchy-pgbadger` sidecar for query analysis. | +| pgbadgerport | `create` | If the `PGBadger` label is set, then this specifies the port that the pgBadger sidecar runs on (e.g. `10000`) | +| pgBouncer | `create`, `update` | If specified, defines the attributes to use for the pgBouncer connection pooling deployment that can be used in conjunction with this PostgreSQL cluster. Please see the specification defined below. | +| pgDataSource | `create` | Used to indicate if a PostgreSQL cluster should bootstrap its data from a pgBackRest repository. This uses the PostgreSQL Data Source Specification, described below. | +| pgoimageprefix | `create` | If provided, the image prefix (or registry) of any PostgreSQL Operator images that are used for jobs, e.g. `registry.developers.crunchydata.com/crunchydata`. The default is to use the image prefix set in the PostgreSQL Operator configuration. | +| podAntiAffinity | `create` | A required section. Sets the [pod anti-affinity rules]({{< relref "/architecture/high-availability/_index.md#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity" >}}) for the PostgreSQL cluster and associated deployments. Please see the `Pod Anti-Affinity Specification` section below. | +| policies | `create` | If provided, a comma-separated list referring to `pgpolicies.crunchydata.com.Spec.Name` that should be run once the PostgreSQL primary is first initialized. | +| port | `create` | The port that PostgreSQL will run on, e.g. `5432`. | +| ReplicaStorage | `create` | A specification that gives information about the storage attributes for any replicas in the PostgreSQL cluster. For details, please see the `Storage Specification` section below. This will likely be changed in the future based on the nature of the high-availability system, but presently it is still required that you set it. It is recommended you use similar settings to that of `PrimaryStorage`. | +| replicas | `create` | The number of replicas to create after a PostgreSQL primary is first initialized. This only works on create; to scale a cluster after it is initialized, please use the [`pgo scale`]({{< relref "/pgo-client/reference/pgo_scale.md" >}}) command. | +| resources | `create`, `update` | Specify the container resource requests that the PostgreSQL cluster should use. Follows the [Kubernetes definitions of resource requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | +| serviceType | `create`, `update` | Sets the Kubernetes [Service](https://kubernetes.io/docs/concepts/services-networking/service/) type to use for the cluster. If not set, defaults to `ClusterIP`. | +| shutdown | `create`, `update` | If set to true, indicates that a PostgreSQL cluster should shutdown. If set to false, indicates that a PostgreSQL cluster should be up and running. | +| standby | `create`, `update` | If set to true, indicates that the PostgreSQL cluster is a "standby" cluster, i.e. is in read-only mode entirely. Please see [Kubernetes Multi-Cluster Deployments]({{< relref "/architecture/high-availability/multi-cluster-kubernetes.md" >}}) for more information. | +| syncReplication | `create` | If set to `true`, specifies the PostgreSQL cluster to use [synchronous replication]({{< relref "/architecture/high-availability/_index.md#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity#synchronous-replication-guarding-against-transactions-loss" >}}).| +| tablespaceMounts | `create`,`update` | Lists any tablespaces that are attached to the PostgreSQL cluster. Tablespaces can be added at a later time by updating the `TablespaceMounts` entry, but they cannot be removed. Stores a map of information, with the key being the name of the tablespace, and the value being a Storage Specification, defined below. | +| tls | `create` | Defines the attributes for enabling TLS for a PostgreSQL cluster. See TLS Specification below. | +| tlsOnly | `create` | If set to true, requires client connections to use only TLS to connect to the PostgreSQL database. | +| tolerations | `create`,`update` | Any array of Kubernetes [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). Please refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) for how to set this field. | +| user | `create` | The name of the PostgreSQL user that is created when the PostgreSQL cluster is first created. | +| userlabels | `create` | A set of key-value string pairs that are used as a sort of "catch-all" as well as a way to add custom labels to clusters. This will disappear at some point. | + +##### Storage Specification + +The storage specification is a spec that defines attributes about the storage to +be used for a particular function of a PostgreSQL cluster (e.g. a primary +instance or for the pgBackRest backup repository). The below describes each +attribute and how it works. + +| Attribute | Action | Description | +|-----------|--------|-------------| +| accessmode | `create` | The name of the Kubernetes Persistent Volume [Access Mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes) to use. | +| matchLabels | `create` | Only used with `StorageType` of `create`, used to match a particular subset of provisioned Persistent Volumes. | +| name | `create` | Only needed for `PrimaryStorage` in `pgclusters.crunchydata.com`.Used to identify the name of the PostgreSQL cluster. Should match `ClusterName`. | +| size | `create` | The size of the [Persistent Volume Claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) (PVC). Must use a Kubernetes resource value, e.g. `20Gi`. | +| storageclass | `create` | The name of the Kubernetes [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) to use. | +| storagetype | `create` | Set to `create` if storage is provisioned (e.g. using `hostpath`). Set to `dynamic` if using a dynamic storage provisioner, e.g. via a `StorageClass`. | +| supplementalgroups | `create` | If provided, a comma-separated list of group IDs to use in case it is needed to interface with a particular storage system. Typically used with NFS or hostpath storage. | + +##### Node Affinity Specification + +Sets the [node affinity]({{< relref "/architecture/high-availability/_index.md#node-affinity" >}}) +for the PostgreSQL cluster and associated deployments. Follows the [Kubernetes standard format for setting node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity), including `preferred` and `required` node affinity. + +To set node affinity for a PostgreSQL cluster, you will need to modify the `default` attribute in the node affinity specification. As mentioned above, the values that `default` accepts match what Kubernetes uses for [node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity). + +For a detailed explanation for node affinity works. Please see the [high-availability]({{< relref "/architecture/high-availability/_index.md#node-affinity" >}}) +documentation. + +| Attribute | Action | Description | +|-----------|--------|-------------| +| default | `create` | The default pod anti-affinity to use for all PostgreSQL instances managed in a given PostgreSQL cluster. Can be overridden on a per-instance basis with the `pgreplicas.crunchydata.com` custom resource. | + +##### Pod Anti-Affinity Specification + +Sets the [pod anti-affinity]({{< relref "/architecture/high-availability/_index.md#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity" >}}) +for the PostgreSQL cluster and associated deployments. Each attribute can +contain one of the following values: + +- `required` +- `preferred` (which is also the recommended default) +- `disabled` + +For a detailed explanation for how this works. Please see the [high-availability]({{< relref "/architecture/high-availability/_index.md#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity" >}}) +documentation. + +| Attribute | Action | Description | +|-----------|--------|-------------| +| default | `create` | The default pod anti-affinity to use for all Pods managed in a given PostgreSQL cluster. | +| pgBackRest | `create` | If set to a value that differs from `Default`, specifies the pod anti-affinity to use for just the pgBackRest repository. | +| pgBouncer | `create` | If set to a value that differs from `Default`, specifies the pod anti-affinity to use for just the pgBouncer Pods. | + +##### PostgreSQL Data Source Specification + +This specification is used when one wants to bootstrap the data in a PostgreSQL +cluster from a pgBackRest repository. This can be a pgBackRest repository that +is attached to an active PostgreSQL cluster or is kept around to be used for +spawning new PostgreSQL clusters. + +| Attribute | Action | Description | +|-----------|--------|-------------| +| restoreFrom | `create` | The name of a PostgreSQL cluster, active or former, that will be used for bootstrapping the data of a new PostgreSQL cluster. | +| restoreOpts | `create` | Additional pgBackRest [restore options](https://pgbackrest.org/command.html#command-restore) that can be used as part of the bootstrapping operation, for example, point-in-time-recovery options. | + +##### TLS Specification + +The TLS specification makes a reference to the various secrets that are required +to enable TLS in a PostgreSQL cluster. For more information on how these secrets +should be structured, please see [Enabling TLS in a PostgreSQL Cluster]({{< relref "/pgo-client/common-tasks.md#enable-tls" >}}). + +| Attribute | Action | Description | +|-----------|--------|-------------| +| caSecret | `create` | A reference to the name of a Kubernetes Secret that specifies a certificate authority for the PostgreSQL cluster to trust. | +| replicationTLSSecret | `create` | A reference to the name of a Kubernetes TLS Secret that contains a keypair for authenticating the replication user. Must be used with `CASecret` and `TLSSecret`. | +| tlsSecret | `create` | A reference to the name of a Kubernetes TLS Secret that contains a keypair that is used for the PostgreSQL instance to identify itself and perform TLS communications with PostgreSQL clients. Must be used with `CASecret`. | + +##### pgBouncer Specification + +The pgBouncer specification defines how a pgBouncer deployment can be deployed +alongside the PostgreSQL cluster. pgBouncer is a PostgreSQL connection pooler +that can also help manage connection state, and is helpful to deploy alongside +a PostgreSQL cluster to help with failover scenarios too. + +| Attribute | Action | Description | +|-----------|--------|-------------| +| limits | `create`, `update` | Specify the container resource limits that the pgBouncer Pods should use. Follows the [Kubernetes definitions of resource limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | +| replicas | `create`, `update` | The number of pgBouncer instances to deploy. Must be set to at least `1` to deploy pgBouncer. Setting to `0` removes an existing pgBouncer deployment for the PostgreSQL cluster. | +| resources | `create`, `update` | Specify the container resource requests that the pgBouncer Pods should use. Follows the [Kubernetes definitions of resource requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container). | +| serviceType | `create`, `update` | Sets the Kubernetes [Service](https://kubernetes.io/docs/concepts/services-networking/service/) type to use for the cluster. If not set, defaults to the `ServiceType` set for the PostgreSQL cluster. | +| tlsSecret | `create` | A reference to the name of a Kubernetes TLS Secret that contains a keypair that is used for the pgBouncer instance to identify itself and perform TLS communications with PostgreSQL clients. Must be used with the parent Spec `TLSSecret` and `CASecret`. | + +##### Annotations Specification + +The `pgcluster.crunchydata.com` specification contains a block that allows for +custom [Annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) +to be applied to the Deployments that are managed by the PostgreSQL Operator, +including: + +- PostgreSQL +- pgBackRest +- pgBouncer + +This also includes the option to apply Annotations globally across the three +different deployment groups. + +| Attribute | Action | Description | +|-----------|--------|-------------| +| backrest | `create`, `update` | Specify annotations that are only applied to the pgBackRest deployments | +| global | `create`, `update` | Specify annotations that are applied to the PostgreSQL, pgBackRest, and pgBouncer deployments | +| pgBouncer | `create`, `update` | Specify annotations that are only applied to the pgBouncer deployments | +| postgres | `create`, `update` | Specify annotations that are only applied to the PostgreSQL deployments | + +### `pgreplicas.crunchydata.com` + +The `pgreplicas.crunchydata.com` Custom Resource Definition contains information +pertaning to the structure of PostgreSQL replicas associated within a PostgreSQL +cluster. All of the attributes only affect the replica when it is created. + +#### Specification (`Spec`) + +| Attribute | Action | Description | +|-----------|--------|-------------| +| clustername | `create` | The name of the PostgreSQL cluster, e.g. `hippo`. This is used to group PostgreSQL instances (primary, replicas) together. | +| name | `create` | The name of this PostgreSQL replica. It should be unique within a `ClusterName`. | +| namespace | `create` | The Kubernetes Namespace that the PostgreSQL cluster is deployed in. | +| nodeAffinity | `create` | Sets the [node affinity rules]({{< relref "/architecture/high-availability/_index.md#node-affinity" >}}) for this PostgreSQL instance. Follows the [Kubernetes standard format for setting node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity). | +| replicastorage | `create` | A specification that gives information about the storage attributes for any replicas in the PostgreSQL cluster. For details, please see the `Storage Specification` section in the `pgclusters.crunchydata.com` description. This will likely be changed in the future based on the nature of the high-availability system, but presently it is still required that you set it. It is recommended you use similar settings to that of `PrimaryStorage`. | +| serviceType | `create`, `update` | Sets the Kubernetes [Service](https://kubernetes.io/docs/concepts/services-networking/service/) type to use for this particular instance. If not set, defaults to the value in the related `pgclusters.crunchydata.com` custom resource. | +| userlabels | `create` | A set of key-value string pairs that are used as a sort of "catch-all" as well as a way to add custom labels to clusters. This will disappear at some point. | +| tolerations | `create`,`update` | Any array of Kubernetes [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). Please refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) for how to set this field. | diff --git a/docs/content/installation/_index.md b/docs/content/installation/_index.md index 909561ce46..036a080a4e 100644 --- a/docs/content/installation/_index.md +++ b/docs/content/installation/_index.md @@ -5,10 +5,10 @@ draft: false weight: 40 --- -There are several different ways to install and deploy the [PostgreSQL Operator](https://www.crunchydata.com/developers/download-postgres/containers/postgres-operator) +There are several different ways to install and deploy the [PGO, the Postgres Operator](https://www.crunchydata.com/developers/download-postgres/containers/postgres-operator) based upon your use case. -For the vast majority of use cases, we recommend using the [PostgreSQL Operator Installer]({{< relref "/installation/postgres-operator.md" >}}), +For the vast majority of use cases, we recommend using the [Postgres Operator Installer]({{< relref "/installation/postgres-operator.md" >}}), which uses the `pgo-deployer` container to set up all of the objects required to run the PostgreSQL Operator. diff --git a/docs/content/installation/configuration.md b/docs/content/installation/configuration.md index ce097d2753..2368a2e621 100644 --- a/docs/content/installation/configuration.md +++ b/docs/content/installation/configuration.md @@ -5,9 +5,9 @@ draft: false weight: 40 --- -# PostgreSQL Operator Installer Configuration +# PGO Installer Configuration -When installing the PostgreSQL Operator you have many configuration options, these +When installing PGO, the Postgres Operator you have many configuration options, these options are listed in this section. ## General Configuration @@ -31,7 +31,7 @@ Operator. | `ccp_image_prefix` | registry.developers.crunchydata.com/crunchydata | **Required** | Configures the image prefix used when creating containers from Crunchy Container Suite. | | `ccp_image_pull_secret` | | | Name of a Secret containing credentials for container image registries. | | `ccp_image_pull_secret_manifest` | | | Provide a path to the Secret manifest to be installed in each namespace. (optional) | -| `ccp_image_tag` | {{< param centosBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}} | **Required** | Configures the image tag (version) used when creating containers from Crunchy Container Suite. | +| `ccp_image_tag` | {{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}} | **Required** | Configures the image tag (version) used when creating containers from Crunchy Container Suite. | | `create_rbac` | true | **Required** | Set to true if the installer should create the RBAC resources required to run the PostgreSQL Operator. | | `crunchy_debug` | false | | Set to configure Operator to use debugging mode. Note: this can cause sensitive data such as passwords to appear in Operator logs. | | `db_name` | | | Set to a value to configure the default database name on all newly created clusters. By default, the PostgreSQL Operator will set it to the name of the cluster that is being created. | @@ -46,7 +46,7 @@ Operator. | `delete_operator_namespace` | false | | Set to configure whether or not the PGO operator namespace (defined using variable `pgo_operator_namespace`) is deleted when uninstalling the PGO. | | `delete_watched_namespaces` | false | | Set to configure whether or not the PGO watched namespaces (defined using variable `namespace`) are deleted when uninstalling the PGO. | | `disable_auto_failover` | false | | If set, will disable autofail capabilities by default in any newly created cluster | -| `disable_fsgroup` | false | | Set to `true` for deployments where you do not want to have the default PostgreSQL fsGroup (26) set. The typical usage is in OpenShift environments that have a `restricted` Security Context Constraints. | +| `disable_fsgroup` | | | Set to `true` for deployments where you do not want to have the default PostgreSQL fsGroup (26) set. The typical usage is in OpenShift environments that have a `restricted` Security Context Constraints. If you use the `anyuid` SCC, you would want to set this to `false`. The Postgres Operator will set this value appropriately by default, except for when using the `anyuid` SCC. | | `exporterport` | 9187 | **Required** | Set to configure the default port used to connect to postgres exporter. | | `metrics` | false | **Required** | Set to true enable performance metrics on all newly created clusters. This can be disabled by the client. | | `namespace` | pgo | | Set to a comma delimited string of all the namespaces Operator will manage. | @@ -69,7 +69,7 @@ Operator. | `pgo_image_prefix` | registry.developers.crunchydata.com/crunchydata | **Required** | Configures the image prefix used when creating containers for the Crunchy PostgreSQL Operator (apiserver, operator, scheduler..etc). | | `pgo_image_pull_secret` | | | Name of a Secret containing credentials for container image registries. | | `pgo_image_pull_secret_manifest` | | | Provide a path to the Secret manifest to be installed in each namespace. (optional) | -| `pgo_image_tag` | {{< param centosBase >}}-{{< param operatorVersion >}} | **Required** | Configures the image tag used when creating containers for the Crunchy PostgreSQL Operator (apiserver, operator, scheduler..etc) | +| `pgo_image_tag` | {{< param ubiBase >}}-{{< param operatorVersion >}} | **Required** | Configures the image tag used when creating containers for the Crunchy PostgreSQL Operator (apiserver, operator, scheduler..etc) | | `pgo_installation_name` | devtest | **Required** | The name of the PGO installation. | | `pgo_noauth_routes` | | | Configures URL routes with mTLS and HTTP BasicAuth disabled. | | `pgo_operator_namespace` | pgo | **Required** | Set to configure the namespace where Operator will be deployed. | @@ -129,30 +129,28 @@ unique ID for each required storage configuration. You can specify the default storage to use for PostgreSQL, pgBackRest, and other elements that require storage that can outlast the lifetime of a Pod. While the -PostgreSQL Operator defaults to using `hostpathstorage` to work with -environments that are typically used to test, we recommend using one of the -other storage classes in production deployments. +PostgreSQL Operator defaults to using `default` to work with the default storage +class available in your environment. | Name | Default | Required | Description | |------|---------|----------|-------------| -| `backrest_storage` | hostpathstorage | **Required** | Set the value of the storage configuration to use for the pgbackrest shared repository deployment created when a user specifies pgbackrest to be enabled on a cluster. | -| `backup_storage` | hostpathstorage | **Required** | Set the value of the storage configuration to use for backups, including the storage for pgbackrest repo volumes. | -| `primary_storage` | hostpathstorage | **Required** | Set to configure which storage definition to use when creating volumes used by PostgreSQL primaries on all newly created clusters. | -| `replica_storage` | hostpathstorage | **Required** | Set to configure which storage definition to use when creating volumes used by PostgreSQL replicas on all newly created clusters. | +| `backrest_storage` | default | **Required** | Set the value of the storage configuration to use for the pgBackRest repository. | +| `backup_storage` | default | **Required** | required, the value of the storage configuration to use for backups generated by `pg_dump`. | +| `primary_storage` | default | **Required** | Set to configure which storage definition to use when creating volumes used by PostgreSQL primaries on all newly created clusters. | +| `replica_storage` | default | **Required** | Set to configure which storage definition to use when creating volumes used by PostgreSQL replicas on all newly created clusters. | | `wal_storage` | | | Set to configure which storage definition to use when creating volumes used for PostgreSQL Write-Ahead Log. | #### Example Defaults ```yaml -backrest_storage: 'nfsstorage' -backup_storage: 'nfsstorage' -primary_storage: 'nfsstorage' -replica_storage: 'nfsstorage' +backrest_storage: default +backup_storage: default +primary_storage: default +replica_storage: default ``` -With the configuration shown above, the `nfsstorage` storage configuration would -be used by default for the various containers created for a PG cluster -(i.e. containers for the primary DB, replica DB's, backups and/or `pgBackRest`). +With the configuration shown above, the default storage class available in the +deployment environment is used. ### Considerations for Multi-Zone Cloud Environments diff --git a/docs/content/installation/metrics/metrics-configuration.md b/docs/content/installation/metrics/metrics-configuration.md index 7d343480cf..cc3e905aa4 100644 --- a/docs/content/installation/metrics/metrics-configuration.md +++ b/docs/content/installation/metrics/metrics-configuration.md @@ -25,7 +25,7 @@ These variables affect the general configuration of PostgreSQL Operator Monitori | `create_rbac` | true | **Required** | Set to true if the installer should create the RBAC resources required to run the PostgreSQL Operator Monitoring infrastructure. | | `db_port` | 5432 | **Required** | Set to configure the PostgreSQL port used by all PostgreSQL clusters. | | `delete_metrics_namespace` | false | | Set to configure whether or not the metrics namespace (defined using variable `metrics_namespace`) is deleted when uninstalling the monitoring infrastructure. | -| `disable_fsgroup` | false | | Set to `true` for deployments where you do not want to have the default PostgreSQL fsGroup (26) set. The typical usage is in OpenShift environments that have a `restricted` Security Context Constraints. | +| `disable_fsgroup` | false | | Set to `true` for deployments where you do not want to have the default PostgreSQL fsGroup (26) set. The typical usage is in OpenShift environments that have a `restricted` Security Context Constraints. If you use the `anyuid` SCC, you would want to set this to `false`. The Postgres Operator will set this value appropriately by default, except for when using the `anyuid` SCC. | | `grafana_admin_password` | admin | **Required** | Set to configure the login password for the Grafana administrator. | | `grafana_admin_username` | admin | **Required** | Set to configure the login username for the Grafana administrator. | | `grafana_install` | true | **Required** | Set to true to install Grafana to visualize metrics. | @@ -108,10 +108,10 @@ and tag as needed to use the RedHat certified containers: | `alertmanager_image_tag` | v0.21.0 | **Required** | Configures the image tag to use for the Alertmanager container. | | `grafana_image_prefix` | grafana | **Required** | Configures the image prefix to use for the Grafana container.| | `grafana_image_name` | grafana | **Required** | Configures the image name to use for the Grafana container. | -| `grafana_image_tag` | 6.7.4 | **Required** | Configures the image tag to use for the Grafana container. | +| `grafana_image_tag` | 6.7.5 | **Required** | Configures the image tag to use for the Grafana container. | | `prometheus_image_prefix` | prom | **Required** | Configures the image prefix to use for the Prometheus container. | | `prometheus_image_name` | promtheus | **Required** | Configures the image name to use for the Prometheus container. | -| `prometheus_image_tag` | v2.20.0 | **Required** | Configures the image tag to use for the Prometheus container. | +| `prometheus_image_tag` | v2.24.0 | **Required** | Configures the image tag to use for the Prometheus container. | Additionally, these same settings can be utilized as needed to support custom image names, tags, and additional container registries. @@ -124,7 +124,7 @@ PostgreSQL Operator Monitoring infrastructure: | Name | Default | Required | Description | |------|---------|----------|-------------| -| `pgo_image_prefix` | registry.developers.crunchydata.com/crunchydata | **Required** | Configures the image prefix used by the `pgo-deployer` container | -| `pgo_image_tag` | {{< param centosBase >}}-{{< param operatorVersion >}} | **Required** | Configures the image tag used by the `pgo-deployer` container | +| `pgo_image_prefix` | registry.developers.crunchydata.com/crunchydata | **Required** | Configures the image prefix used by the `pgo-deployer` container | +| `pgo_image_tag` | {{< param ubiBase >}}-{{< param operatorVersion >}} | **Required** | Configures the image tag used by the `pgo-deployer` container | -[k8s-service-type]: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types \ No newline at end of file +[k8s-service-type]: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types diff --git a/docs/content/installation/metrics/other/ansible/_index.md b/docs/content/installation/metrics/other/ansible/_index.md index cede0bb875..e48c6801dd 100644 --- a/docs/content/installation/metrics/other/ansible/_index.md +++ b/docs/content/installation/metrics/other/ansible/_index.md @@ -5,17 +5,17 @@ draft: false weight: 10 --- -# Crunchy Data PostgreSQL Operator Monitoring Playbooks +# PGO: Postgres Operator Monitoring Playbooks -The Crunchy Data PostgreSQL Operator Monitoring Playbooks contain [Ansible](https://www.ansible.com/) +PGO, the Postgres Operator from Crunchy Data, Monitoring Playbooks contain [Ansible](https://www.ansible.com/) roles for installing and managing the [Crunchy Data PostgreSQL Operator Monitoring infrastructure]({{< relref "/installation/other/ansible/installing-operator.md" >}}). ## Features The playbooks provided allow users to: -* install PostgreSQL Operator Monitoring on Kubernetes and OpenShift -* install PostgreSQL Operator from a Linux, Mac or Windows (Ubuntu subsystem) host +* install PGO Monitoring on Kubernetes and OpenShift +* install PGO from a Linux, Mac or Windows (Ubuntu subsystem) host * support a variety of deployment models ## Resources diff --git a/docs/content/installation/metrics/other/ansible/metrics-prerequisites.md b/docs/content/installation/metrics/other/ansible/metrics-prerequisites.md index 1e9d31164d..116559ea30 100644 --- a/docs/content/installation/metrics/other/ansible/metrics-prerequisites.md +++ b/docs/content/installation/metrics/other/ansible/metrics-prerequisites.md @@ -62,7 +62,6 @@ if you are being using them for your environment. Both sets of variables cannot be used at the same time. The unused variables should be left commented out or removed. {{% /notice %}} - | Name | Default | Required | Description | |-----------------------------------|-------------|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `kubernetes_context` | | **Required**, if deploying to Kubernetes |When deploying to Kubernetes, set to configure the context name of the kubeconfig to be used for authentication. | @@ -83,10 +82,15 @@ kubectl config current-context ## Configuring - `values.yaml` The `values.yaml` file contains all of the configuration parameters -for deploying the PostgreSQL Operator Monitoring infrastructure. +for deploying the PostgreSQL Operator Monitoring infrastructure. The [example file](https://github.com/CrunchyData/postgres-operator/blob/v{{< param operatorVersion >}}/installers/metrics/ansible/values.yaml) contains defaults that should work in most Kubernetes environments, but it may require some customization. +Note that in OpenShift and CodeReady Containers you will need to set the +`disable_fsgroup` to `true` attribute to `true` if you are using the +`restricted` Security Context Constraint (SCC). If you are using the `anyuid` +SCC, you will need to set `disable_fsgroup` to `false`. + For a detailed description of each configuration parameter, please read the [PostgreSQL Operator Installer Metrics Configuration Reference](<{{< relref "/installation/metrics/metrics-configuration.md">}}>) diff --git a/docs/content/installation/metrics/other/helm-metrics.md b/docs/content/installation/metrics/other/helm-metrics.md index fb94918003..e09a1ae5c8 100644 --- a/docs/content/installation/metrics/other/helm-metrics.md +++ b/docs/content/installation/metrics/other/helm-metrics.md @@ -57,10 +57,15 @@ file will be used to populate the configuation options in the ConfigMap. ### Configuration - `values.yaml` The `values.yaml` file contains all of the configuration parameters for deploying -the PostgreSQL Operator Monitoring infrastructure. +the PostgreSQL Operator Monitoring infrastructure. The [values.yaml file](https://github.com/CrunchyData/postgres-operator/blob/master/installers/metrics/helm/values.yaml) contains the defaults that should work in most Kubernetes environments, but it may require some customization. +Note that in OpenShift and CodeReady Containers you will need to set the +`disable_fsgroup` to `true` attribute to `true` if you are using the +`restricted` Security Context Constraint (SCC). If you are using the `anyuid` +SCC, you will need to set `disable_fsgroup` to `false`. + For a detailed description of each configuration parameter, please read the [PostgreSQL Operator Monitoring Installer Configuration Reference](<{{< relref "/installation/metrics/metrics-configuration.md">}}>) @@ -81,11 +86,11 @@ upgrade and uninstall the PostgreSQL Operator. ## Upgrade and Uninstall -Once install has be completed using Helm, it will also be used to upgrade and +Once install has be completed using Helm, it will also be used to upgrade and uninstall your PostgreSQL Operator. {{% notice tip %}} -The `name` and `namespace` in the following sections should match the options +The `name` and `namespace` in the following sections should match the options provided at install. {{% /notice %}} @@ -111,7 +116,7 @@ helm uninstall -n ## Debugging -When the `pgo-deployer` job does not complete successfully, the resources that +When the `pgo-deployer` job does not complete successfully, the resources that are created and normally cleaned up by Helm will be left in your Kubernetes cluster. This will allow you to use the failed job and its logs to debug the issue. The following command will show the logs for the `pgo-deployer` diff --git a/docs/content/installation/metrics/postgres-operator-metrics.md b/docs/content/installation/metrics/postgres-operator-metrics.md index a077862ba9..2440ebfa66 100644 --- a/docs/content/installation/metrics/postgres-operator-metrics.md +++ b/docs/content/installation/metrics/postgres-operator-metrics.md @@ -18,6 +18,11 @@ kubectl create namespace pgo kubectl apply -f https://raw.githubusercontent.com/CrunchyData/postgres-operator/v{{< param operatorVersion >}}/installers/metrics/kubectl/postgres-operator-metrics.yml ``` +Note that in OpenShift and CodeReady Containers you will need to set the +`disable_fsgroup` to `true` attribute to `true` if you are using the +`restricted` Security Context Constraint (SCC). If you are using the `anyuid` +SCC, you will need to set `disable_fsgroup` to `false`. + However, we still advise that you read onward to see how to properly configure the PostgreSQL Operator Monitoring infrastructure. @@ -53,13 +58,13 @@ environmental requirements. By default, the `pgo-deployer` uses a ServiceAccount called `pgo-metrics-deployer-sa` that has a ClusterRoleBinding (`pgo-metrics-deployer-crb`) with several ClusterRole permissions. This ClusterRole is needed for the initial configuration and deployment -of the various applications comprising the monitoring infrastructure. This includes permissions +of the various applications comprising the monitoring infrastructure. This includes permissions to create: * RBAC for use by Prometheus and/or Grafana * The metrics namespace -The required list of privileges are available in the +The required list of privileges are available in the [postgres-operator-metrics.yml](https://raw.githubusercontent.com/CrunchyData/postgres-operator/v{{< param operatorVersion >}}/installers/metrics/kubectl/postgres-operator-metrics.yml) file: @@ -95,6 +100,11 @@ for deploying PostgreSQL Operator Monitoring. The [example file](https://github. contains defaults that should work in most Kubernetes environments, but it may require some customization. +Note that in OpenShift and CodeReady Containers you will need to set the +`disable_fsgroup` to `true` attribute to `true` if you are using the +`restricted` Security Context Constraint (SCC). If you are using the `anyuid` +SCC, you will need to set `disable_fsgroup` to `false`. + For a detailed description of each configuration parameter, please read the [PostgreSQL Operator Monitoring Installer Configuration Reference](<{{< relref "/installation/metrics/metrics-configuration.md">}}>) @@ -103,7 +113,7 @@ For a detailed description of each configuration parameter, please read the The deploy job can be used to perform different deployment actions for the PostgreSQL Operator Monitoring infrastructure. When you run the job it will install the monitoring infrastructure by default but you can change the deployment action to -uninstall or update. The `DEPLOY_ACTION` environment variable in the `postgres-operator-metrics.yml` +uninstall or update. The `DEPLOY_ACTION` environment variable in the `postgres-operator-metrics.yml` file can be set to `install-metrics`, `update-metrics`, and `uninstall-metrics`. ### Image Pull Secrets diff --git a/docs/content/installation/other/_index.md b/docs/content/installation/other/_index.md index 54722b3e61..1dc5fd84b8 100644 --- a/docs/content/installation/other/_index.md +++ b/docs/content/installation/other/_index.md @@ -6,6 +6,6 @@ weight: 50 --- Though the years, we have built up several other methods for installing the -PostgreSQL Operator. The next few sections provide some alternative ways of +PGO. The next few sections provide some alternative ways of deploying the PostgreSQL Operator. Some of these methods are deprecated and may be removed in a future release. diff --git a/docs/content/installation/other/ansible/_index.md b/docs/content/installation/other/ansible/_index.md index 0cd09a034d..69a647511e 100644 --- a/docs/content/installation/other/ansible/_index.md +++ b/docs/content/installation/other/ansible/_index.md @@ -5,17 +5,17 @@ draft: false weight: 100 --- -# Crunchy Data PostgreSQL Operator Playbooks +# PGO: Postgres Operator Playbooks -The Crunchy Data PostgreSQL Operator Playbooks contain [Ansible](https://www.ansible.com/) +PGO, the Postgres Operator from Crunchy Data, Playbooks contain [Ansible](https://www.ansible.com/) roles for installing and managing the [Crunchy Data PostgreSQL Operator]({{< relref "/installation/other/ansible/installing-operator.md" >}}). ## Features The playbooks provided allow users to: -* install PostgreSQL Operator on Kubernetes and OpenShift -* install PostgreSQL Operator from a Linux, Mac or Windows (Ubuntu subsystem) host +* install PGO on Kubernetes and OpenShift +* install PGO from a Linux, Mac or Windows (Ubuntu subsystem) host * generate TLS certificates required by the PostgreSQL Operator * support a variety of deployment models diff --git a/docs/content/installation/other/ansible/installing-operator.md b/docs/content/installation/other/ansible/installing-operator.md index 8cc82d1448..520732ce72 100644 --- a/docs/content/installation/other/ansible/installing-operator.md +++ b/docs/content/installation/other/ansible/installing-operator.md @@ -56,38 +56,53 @@ oc get deployments -n oc get pods -n ``` -## Configure Environment Variables - -After the Crunchy PostgreSQL Operator has successfully been installed we will need -to configure local environment variables before using the `pgo` client. +## Install the `pgo` Client {{% notice info %}} - If TLS authentication was disabled during installation, please see the [TLS Configuration Page] ({{< relref "Configuration/tls.md" >}}) for additional configuration information. - {{% / notice %}} -To configure the environment variables used by `pgo` run the following command: +During or after the installation of PGO: the Postgres Operator, download the `pgo` client set up script. This will help set up your local environment for using the Postgres Operator: -Note: `` should be replaced with the namespace the Crunchy PostgreSQL -Operator was deployed to. +``` +curl https://raw.githubusercontent.com/CrunchyData/postgres-operator/v{{< param operatorVersion >}}/installers/kubectl/client-setup.sh > client-setup.sh +chmod +x client-setup.sh +``` -```bash -cat <> ~/.bashrc -export PGOUSER="${HOME?}/.pgo//pgouser" -export PGO_CA_CERT="${HOME?}/.pgo//client.crt" -export PGO_CLIENT_CERT="${HOME?}/.pgo//client.crt" -export PGO_CLIENT_KEY="${HOME?}/.pgo//client.key" +When the Postgres Operator is done installing, run the client setup script: + +``` +./client-setup.sh +``` + +This will download the `pgo` client and provide instructions for how to easily use it in your environment. It will prompt you to add some environmental variables for you to set up in your session, which you can do with the following commands: + +``` +export PGOUSER="${HOME?}/.pgo/pgo/pgouser" +export PGO_CA_CERT="${HOME?}/.pgo/pgo/client.crt" +export PGO_CLIENT_CERT="${HOME?}/.pgo/pgo/client.crt" +export PGO_CLIENT_KEY="${HOME?}/.pgo/pgo/client.key" export PGO_APISERVER_URL='https://127.0.0.1:8443' -EOF +export PGO_NAMESPACE=pgo ``` -Apply those changes to the current session by running: +If you wish to permanently add these variables to your environment, you can run the following: + +``` +cat <> ~/.bashrc +export PGOUSER="${HOME?}/.pgo/pgo/pgouser" +export PGO_CA_CERT="${HOME?}/.pgo/pgo/client.crt" +export PGO_CLIENT_CERT="${HOME?}/.pgo/pgo/client.crt" +export PGO_CLIENT_KEY="${HOME?}/.pgo/pgo/client.key" +export PGO_APISERVER_URL='https://127.0.0.1:8443' +export PGO_NAMESPACE=pgo +EOF -```bash source ~/.bashrc ``` +**NOTE**: For macOS users, you must use `~/.bash_profile` instead of `~/.bashrc` + ## Verify `pgo` Connection In a separate terminal we need to setup a port forward to the Crunchy PostgreSQL diff --git a/docs/content/installation/other/bash.md b/docs/content/installation/other/bash.md index be9963da7a..6d494182d0 100644 --- a/docs/content/installation/other/bash.md +++ b/docs/content/installation/other/bash.md @@ -5,18 +5,18 @@ draft: false weight: 100 --- -A full installation of the Operator includes the following steps: +A full installation of PGO includes the following steps: - - get the Operator project + - get the PGO project - configure your environment variables - - configure Operator templates + - configure PGO templates - create security resources - deploy the operator - - install pgo CLI (end user command tool) + - install `pgo` client (end user command tool) -Operator end-users are only required to install the pgo CLI client on their host and can skip the server-side installation steps. pgo CLI clients are provided for Linux, Mac, and Windows clients. +PGO end-users are only required to install the `pgo` client on their host and can skip the server-side installation steps. `pgo` clients are provided for Linux, Mac, and Windows clients. -The Operator can be deployed by multiple methods including: +PGO can be deployed by multiple methods including: * default installation * Ansible playbook installation @@ -25,7 +25,7 @@ The Operator can be deployed by multiple methods including: ## Default Installation - Get Project -The Operator project is hosted on GitHub. You can get a copy using `git clone`: +The PGO source code is made available on GitHub. You can get a copy using `git clone`: git clone -b v{{< param operatorVersion >}} https://github.com/CrunchyData/postgres-operator.git cd postgres-operator @@ -53,9 +53,9 @@ for Kubernetes events. This value is set as follows: This means namespaces called *pgouser1* and *pgouser2* will be created as part of the default installation. -{{% notice warning %}}In Kubernetes versions prior to 1.12 (including Openshift up through 3.11), there is a limitation that requires an extra step during installation for the operator to function properly with watched namespaces. This limitation does not exist when using Kubernetes 1.12+. When a list of namespaces are provided through the NAMESPACE environment variable, the setupnamespaces.sh script handles the limitation properly in both the bash and ansible installation. +{{% notice warning %}}In Kubernetes versions prior to 1.12 (including Openshift up through 3.11), there is a limitation that requires an extra step during installation for PGO to function properly with watched namespaces. This limitation does not exist when using Kubernetes 1.12+. When a list of namespaces are provided through the NAMESPACE environment variable, the setupnamespaces.sh script handles the limitation properly in both the bash and ansible installation. -However, if the user wishes to add a new watched namespace after installation, where the user would normally use pgo create namespace to add the new namespace, they should instead run the add-targeted-namespace.sh script or they may give themselves cluster-admin privileges instead of having to run setupnamespaces.sh script. Again, this is only required when running on a Kubernetes distribution whose version is below 1.12. In Kubernetes version 1.12+ the pgo create namespace command works as expected. +However, if the user wishes to add a new watched namespace after installation, where the user would normally use `pgo create namespace` to add the new namespace, they should instead run the add-targeted-namespace.sh script or they may give themselves cluster-admin privileges instead of having to run setupnamespaces.sh script. Again, this is only required when running on a Kubernetes distribution whose version is below 1.12. In Kubernetes version 1.12+ the pgo create namespace command works as expected. {{% /notice %}} @@ -77,13 +77,13 @@ Create the Operator namespaces using the Makefile target: The [Design](/design) section of this documentation talks further about the use of namespaces within the Operator. -## Default Installation - Configure Operator Templates +## Default Installation - Configure PGO Templates -Within the Operator [*PGO_CONF_DIR*](/developer-setup/) directory are several configuration files and templates used by the Operator to determine the various resources that it deploys on your Kubernetes cluster, specifically the PostgreSQL clusters it deploys. +Within PGO's [*PGO_CONF_DIR*](/developer-setup/) directory are several configuration files and templates used by PGO to determine the various resources that it deploys on your Kubernetes cluster, specifically the PostgreSQL clusters it deploys. -When you install the Operator you must make choices as to what kind of storage the Operator has to work with for example. Storage varies with each installation. As an installer, you would modify these configuration templates used by the Operator to customize its behavior. +When you install PGO you must make choices as to what kind of storage the Operator has to work with for example. Storage varies with each installation. As an installer, you would modify these configuration templates used by the Operator to customize its behavior. -**Note**: when you want to make changes to these Operator templates and configuration files after your initial installation, you will need to re-deploy the Operator in order for it to pick up any future configuration changes. +**Note**: when you want to make changes to these PGO templates and configuration files after your initial installation, you will need to re-deploy the Operator in order for it to pick up any future configuration changes. Here are some common examples of configuration changes most installers would make: @@ -104,12 +104,10 @@ Listed above are the *pgo.yaml* sections related to storage choices. *PrimarySt This sort of configuration allows for a PostgreSQL primary and replica to use different storage if you want. Other storage settings like *AccessMode*, *Size*, *StorageType*, and *StorageClass* further define the storage configuration. Currently, NFS, HostPath, and Storage Classes are supported in the configuration. -As part of the Operator installation, you will need to adjust these storage settings to suit your deployment requirements. For users wanting to try +As part of PGO installation, you will need to adjust these storage settings to suit your deployment requirements. For users wanting to try out the Operator on Google Kubernetes Engine you would make the following change to the storage configuration in pgo.yaml: - - For NFS Storage, it is assumed that there are sufficient Persistent Volumes (PV) created for the Operator to use when it creates Persistent Volume Claims (PVC). The creation of Persistent Volumes is something a Kubernetes cluster-admin user would typically provide before installing the Operator. There is an example script which can be used to create NFS Persistent Volumes located here: ./pv/create-nfs-pv.sh @@ -129,11 +127,11 @@ Operator. Other settings in *pgo.yaml* are described in the [pgo.yaml Configuration](/configuration/pgo-yaml-configuration) section of the documentation. -## Operator Security +## PGO Security -The Operator implements its own RBAC (Role Based Access Controls) for authenticating Operator users access to the Operator REST API. +PGO implements its own RBAC (Role Based Access Controls) for authenticating Operator users access to the PGO REST API. -A default admin user is created when the operator is deployed. Create a .pgouser in your home directory and insert the text from below: +A default admin user is created when PGO is deployed. Create a .pgouser in your home directory and insert the text from below: ``` admin:examplepassword @@ -151,7 +149,7 @@ To create a unique administrator user on deployment of the operator edit this fi $PGOROOT/deploy/install-bootstrap-creds.sh ``` -After installation users can create optional Operator users as follows: +After installation users can create optional PGO users as follows: ``` pgo create pgouser someuser --pgouser-namespaces="pgouser1,pgouser2" --pgouser-password=somepassword --pgouser-roles="somerole,someotherrole" @@ -160,13 +158,13 @@ pgo create pgouser someuser --pgouser-namespaces="pgouser1,pgouser2" --pgouser-p Note, you can also store the pgouser file in alternate locations, see the Security documentation for details. -Operator security is discussed in the Security section [Security](/security) of the documentation. +PGO security is further discussed in the section [Security]({{< relref "security/_index.md" >}}) section of the documentation. Adjust these settings to meet your local requirements. ## Default Installation - Create Kubernetes RBAC Controls -The Operator installation requires Kubernetes administrators to create Resources required by the Operator. These resources are only allowed to be created by a cluster-admin user. To install on Google Cloud, you will need a user +PGO installation requires Kubernetes administrators to create Resources required by PGO. These resources are only allowed to be created by a cluster-admin user. To install on Google Cloud, you will need a user account with cluster-admin privileges. If you own the GKE cluster you are installing on, you can add cluster-admin role to your account as follows: @@ -179,9 +177,9 @@ Tor create the Kubernetes RBAC used by the Operator, run the following as a clus make installrbac -This set of Resources is created a single time unless a new Operator +This set of Resources is created a single time unless a new PGO release requires these Resources to be recreated. Note that when you -run *make installrbac* the set of keys used by the Operator REST API and +run *make installrbac* the set of keys used by the PGO REST API and also the pgbackrest ssh keys are generated. Verify the Operator Custom Resource Definitions are created as follows: @@ -193,14 +191,15 @@ You should see the *pgclusters* CRD among the listed CRD resource types. See the Security documentation for a description of the various RBAC resources created and used by the Operator. -## Default Installation - Deploy the Operator +## Default Installation - Deploy PGO + At this point, you as a normal Kubernetes user should be able to deploy the Operator. To do this, run the following Makefile target: make deployoperator -This will cause any existing Operator to be removed first, then the configuration to be bundled into a ConfigMap, then the Operator Deployment to be created. +This will cause any existing PGO installation to be removed first, then the configuration to be bundled into a ConfigMap, then the Operator Deployment to be created. -This will create a postgres-operator Deployment and a postgres-operator Service.Operator administrators needing to make changes to the Operator +This will create a postgres-operator Deployment and a postgres-operator Service.Operator administrators needing to make changes to the PGO configuration would run this make target to pick up any changes to pgo.yaml, pgo users/roles, or the Operator templates. @@ -211,17 +210,20 @@ created using the default installation by running the following: make cleannamespaces -This will permanently delete each namespace the Operator installation +This will permanently delete each namespace the PGO installation created previously. -## pgo CLI Installation -Most users will work with the Operator using the *pgo* CLI tool. That tool is downloaded from the GitHub Releases page for the Operator (https://github.com/crunchydata/postgres-operator/releases). Crunchy Enterprise Customer can download the pgo binaries from https://access.crunchydata.com/ on the downloads page. +## `pgo` client Installation -The *pgo* client is provided in Mac, Windows, and Linux binary formats, +Most users will work with the Operator using the `pgo` client. That tool is downloaded from the GitHub Releases page for the Operator (https://github.com/crunchydata/postgres-operator/releases). Crunchy Data customers can download the `pgo` binaries from https://access.crunchydata.com/ on the downloads page. + +The `pgo` client is provided in Mac, Windows, and Linux binary formats, download the appropriate client to your local laptop or workstation to work with a remote Operator. +You can also use the `pgo-client` container. + {{% notice info %}} If TLS authentication was disabled during installation, please see the [TLS Configuration Page] ({{< relref "Configuration/tls.md" >}}) for additional configuration information. @@ -239,9 +241,9 @@ Prior to using *pgo*, users testing the Operator on a single host can specify th pgo version ``` -That URL address needs to be reachable from your local *pgo* client host. Your Kubernetes administrator will likely need to create a network route, ingress, or LoadBalancer service to expose the Operator REST API to applications outside of the Kubernetes cluster. Your Kubernetes administrator might also allow you to run the Kubernetes port-forward command, contact your administrator for details. +That URL address needs to be reachable from your local `pgo` client host. Your Kubernetes administrator will likely need to create a network route, ingress, or LoadBalancer service to expose the PGO REST API to applications outside of the Kubernetes cluster. Your Kubernetes administrator might also allow you to run the Kubernetes port-forward command, contact your administrator for details. -Next, the *pgo* client needs to reference the keys used to secure the Operator REST API: +Next, the `pgo` client needs to reference the keys used to secure the PGO REST API: ``` export PGO_CA_CERT=$PGOROOT/conf/postgres-operator/server.crt @@ -253,7 +255,7 @@ You can also specify these keys on the command line as follows: pgo version --pgo-ca-cert=$PGOROOT/conf/postgres-operator/server.crt --pgo-client-cert=$PGOROOT/conf/postgres-operator/server.crt --pgo-client-key=$PGOROOT/conf/postgres-operator/server.key -{{% notice tip %}} if you are running the Operator on Google Cloud, you would open up another terminal and run *kubectl port-forward ...* to forward the Operator pod port 8443 to your localhost where you can access the Operator API from your local workstation. +{{% notice tip %}} if you are running PGO on Google Cloud, you would open up another terminal and run *kubectl port-forward ...* to forward the Postgres Operator pod port 8443 to your localhost where you can access the PGO API from your local workstation. {{% /notice %}} At this point, you can test connectivity between your laptop or workstation and the Postgres Operator deployed on a Kubernetes cluster as follows: @@ -264,7 +266,7 @@ You should get back a valid response showing the client and server version numbe ## Verify the Installation -Now that you have deployed the Operator, you can verify that it is running correctly. +Now that you have deployed PGO, you can verify that it is running correctly. You should see a pod running that contains the Operator: @@ -275,10 +277,10 @@ You should see a pod running that contains the Operator: That pod should show 3 of 3 containers in *running* state and that the operator is installed into the *pgo* namespace. -The sample environment script, examples/env.sh, if used creates some bash functions that you can use to view the Operator logs. This is useful in case you find one of the Operator containers not in a running status. +The sample environment script, examples/env.sh, if used creates some bash functions that you can use to view the Postgres Operator logs. This is useful in case you find one of the PGO containers not in a running status. -Using the pgo CLI, you can verify the versions of the client and server match as follows: +Using the `pgo` client, you can verify the versions of the client and server match as follows: pgo version -This also tests connectivity between your pgo client host and the Operator server. +This also tests connectivity between your `pgo` client host and Postgres Operator container. diff --git a/docs/content/installation/other/google-cloud-marketplace.md b/docs/content/installation/other/google-cloud-marketplace.md index 64fa52a684..78885dc7af 100644 --- a/docs/content/installation/other/google-cloud-marketplace.md +++ b/docs/content/installation/other/google-cloud-marketplace.md @@ -5,7 +5,7 @@ draft: false weight: 200 --- -The PostgreSQL Operator is installed as part of [Crunchy PostgreSQL for GKE][gcm-listing] +PGO: the PostgreSQL Operator from Crunchy Data is installed as part of [Crunchy PostgreSQL for GKE][gcm-listing] that is available in the Google Cloud Marketplace. [gcm-listing]: https://console.cloud.google.com/marketplace/details/crunchydata/crunchy-postgresql-operator @@ -16,7 +16,6 @@ that is available in the Google Cloud Marketplace. Install [Crunchy PostgreSQL for GKE][gcm-listing] to a Google Kubernetes Engine cluster using Google Cloud Marketplace. - ## Step 2: Verify Installation Install `kubectl` using the `gcloud components` command of the [Google Cloud SDK][sdk-install] or @@ -25,7 +24,7 @@ by following the [Kubernetes documentation][kubectl-install]. [kubectl-install]: https://kubernetes.io/docs/tasks/tools/install-kubectl/ [sdk-install]: https://cloud.google.com/sdk/docs/install -Using the `gcloud` utility, ensure you are logged into the GKE cluster in which you installed the +Using the `gcloud` utility, ensure you are logged into the GKE cluster in which you installed PGO, the PostgreSQL Operator, and see that it is running in the namespace in which you installed it. For example, in the `pgo` namespace: @@ -44,7 +43,7 @@ pod/postgres-operator-56d6ccb97-tmz7m 4/4 Running 0 2m ``` -## Step 3: Install the PostgreSQL Operator User Keys +## Step 3: Install the PGO User Keys You will need to get TLS keys used to secure the Operator REST API. Again, in the `pgo` namespace: @@ -54,9 +53,9 @@ kubectl -n pgo get secret pgo.tls -o 'go-template={{ index .data "tls.key" | bas ``` -## Step 4: Setup PostgreSQL Operator User +## Step 4: Setup PGO User -The PostgreSQL Operator implements its own role-based access control (RBAC) system for authenticating and authorization PostgreSQL Operator users access to its REST API. A default PostgreSQL Operator user (aka a "pgouser") is created as part of the marketplace installation (these credentials are set during the marketplace deployment workflow). +PGO implements its own role-based access control (RBAC) system for authenticating and authorization PostgreSQL Operator users access to its REST API. A default PostgreSQL Operator user (aka a "pgouser") is created as part of the marketplace installation (these credentials are set during the marketplace deployment workflow). Create the pgouser file in `${HOME?}/.pgo//pgouser` and insert the user and password you created on deployment of the PostgreSQL Operator via GCP Marketplace. For example, if you set up a user with the username of `username` and a password of `hippo`: @@ -67,7 +66,7 @@ username:hippo ## Step 5: Setup Environment variables -The PostgreSQL Operator Client uses several environmental variables to make it easier for interfacing with the PostgreSQL Operator. +The `pgo` Client uses several environmental variables to make it easier for interfacing with the PGO, the Postgres Operator. Set the environmental variables to use the key / certificate pair that you pulled in Step 3 was deployed via the marketplace. Using the previous examples, You can set up environment variables with the following command: @@ -98,13 +97,13 @@ source ~/.bashrc **NOTE**: For macOS users, you must use `~/.bash_profile` instead of `~/.bashrc` -## Step 6: Install the PostgreSQL Operator Client `pgo` +## Step 6: Install the `pgo` Client -The [`pgo` client](/pgo-client/) provides a helpful command-line interface to perform key operations on a PostgreSQL Operator, such as creating a PostgreSQL cluster. +The [`pgo` client](/pgo-client/) provides a helpful command-line interface to perform key operations on a PGO Deployment, such as creating a PostgreSQL cluster. The `pgo` client can be downloaded from GitHub [Releases](https://github.com/crunchydata/postgres-operator/releases) (subscribers can download it from the [Crunchy Data Customer Portal](https://access.crunchydata.com)). -Note that the `pgo` client's version must match the version of the PostgreSQL Operator that you have deployed. For example, if you have deployed version {{< param operatorVersion >}} of the PostgreSQL Operator, you must use the `pgo` for {{< param operatorVersion >}}. +Note that the `pgo` client's version must match the deployed version of PGO. For example, if you have deployed version {{< param operatorVersion >}} of the PostgreSQL Operator, you must use the `pgo` for {{< param operatorVersion >}}. Once you have download the `pgo` client, change the permissions on the file to be executable if need be as shown below: @@ -112,9 +111,9 @@ Once you have download the `pgo` client, change the permissions on the file to b chmod +x pgo ``` -## Step 7: Connect to the PostgreSQL Operator +## Step 7: Connect to PGO -Finally, let's see if we can connect to the PostgreSQL Operator from the `pgo` client. In order to communicate with the PostgreSQL Operator API server, you will first need to set up a [port forward](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to your local environment. +Finally, let's see if we can connect to the Postgres Operator from the `pgo` client. In order to communicate with the PGO API server, you will first need to set up a [port forward](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to your local environment. In a new console window, run the following command to set up a port forward: @@ -137,7 +136,7 @@ pgo-apiserver version {{< param operatorVersion >}} ## Step 8: Create a Namespace -We are almost there! You can optionally add a namespace that can be managed by the PostgreSQL Operator to watch and to deploy a PostgreSQL cluster into. +We are almost there! You can optionally add a namespace that can be managed by PGO to watch and to deploy a PostgreSQL cluster into. ```shell pgo create namespace wateringhole @@ -194,4 +193,3 @@ cluster : hippo ``` The `pgo test` command provides you the basic information you need to connect to your PostgreSQL cluster from within your Kubernetes environment. For more detailed information, you can use `pgo show cluster -n wateringhole hippo`. - diff --git a/docs/content/installation/other/helm.md b/docs/content/installation/other/helm.md index b4bab8ff26..43da7fd553 100644 --- a/docs/content/installation/other/helm.md +++ b/docs/content/installation/other/helm.md @@ -5,12 +5,13 @@ draft: false weight: 100 --- -# The PostgreSQL Operator Helm Chart +# PGO: The Postgres Operator Helm Chart ## Overview -The PostgreSQL Operator comes with a container called `pgo-deployer` which -handles a variety of lifecycle actions for the PostgreSQL Operator, including: +PGO, the Postgres Operator from Crunchy Data, comes with a +container called `pgo-deployer` which handles a variety of +lifecycle actions for the PostgreSQL Operator, including: - Installation - Upgrading @@ -178,11 +179,11 @@ pgo-apiserver version {{< param operatorVersion >}} ## Upgrade and Uninstall -Once install has be completed using Helm, it will also be used to upgrade and +Once install has be completed using Helm, it will also be used to upgrade and uninstall your PostgreSQL Operator. {{% notice tip %}} -The `name` and `namespace` in the following sections should match the options +The `name` and `namespace` in the following sections should match the options provided at install. {{% /notice %}} @@ -208,7 +209,7 @@ helm uninstall -n ## Debugging -When the `pgo-deployer` job does not complete successfully, the resources that +When the `pgo-deployer` job does not complete successfully, the resources that are created and normally cleaned up by Helm will be left in your Kubernetes cluster. This will allow you to use the failed job and its logs to debug the issue. The following command will show the logs for the `pgo-deployer` diff --git a/docs/content/installation/other/operator-hub.md b/docs/content/installation/other/operator-hub.md index 9b077ef073..38db177892 100644 --- a/docs/content/installation/other/operator-hub.md +++ b/docs/content/installation/other/operator-hub.md @@ -6,7 +6,7 @@ weight: 200 --- If your Kubernetes cluster is already running the [Operator Lifecycle Manager][OLM], -the PostgreSQL Operator can be installed as part of [Crunchy PostgreSQL for Kubernetes][hub-listing] +then PGO, the Postgres Operator from Crunchy Data, can be installed as part of [Crunchy PostgreSQL for Kubernetes][hub-listing] that is available in OperatorHub.io. [hub-listing]: https://operatorhub.io/operator/postgresql @@ -15,64 +15,23 @@ that is available in OperatorHub.io. ## Before You Begin -There are a few manual steps that the cluster administrator must perform prior to installing the PostgreSQL Operator. -At the very least, it must be provided with an initial configuration. +There are some optional Secrets you can add before installing PGO into your cluster. -First, make sure OLM and the OperatorHub.io catalog are installed by running -`kubectl get CatalogSources --all-namespaces`. You should see something similar to the following: +### Secrets (optional) -``` -NAMESPACE NAME DISPLAY TYPE PUBLISHER -olm operatorhubio-catalog Community Operators grpc OperatorHub.io -``` - -Take note of the name and namespace above, you will need them later on. - -Next, select a namespace in which to install the PostgreSQL Operator. PostgreSQL clusters will also be deployed here. -If it does not exist, create it now. - -``` -export PGO_OPERATOR_NAMESPACE=pgo -kubectl create namespace "$PGO_OPERATOR_NAMESPACE" -``` - -Next, clone the PostgreSQL Operator repository locally. - -``` -git clone -b v{{< param operatorVersion >}} https://github.com/CrunchyData/postgres-operator.git -cd postgres-operator -``` - -### PostgreSQL Operator Configuration - -Edit `conf/postgres-operator/pgo.yaml` to configure the deployment. Look over all of the options and make any -changes necessary for your environment. A full description of each option is available in the -[`pgo.yaml` configuration guide]({{< relref "configuration/pgo-yaml-configuration.md" >}}). - -When the file is ready, upload the entire directory to the `pgo-config` ConfigMap. - -``` -kubectl -n "$PGO_OPERATOR_NAMESPACE" create configmap pgo-config \ - --from-file=./conf/postgres-operator -``` - -### Secrets - -Configure pgBackRest for your environment. If you do not plan to use AWS S3 to store backups, you can omit -the `aws-s3` keys below. +If you plan to use AWS S3 to store backups and would like to have the keys available for every backup, you can create a Secret as described below: ``` kubectl -n "$PGO_OPERATOR_NAMESPACE" create secret generic pgo-backrest-repo-config \ - --from-file=./installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/config \ - --from-file=./installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/sshd_config \ - --from-file=./installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/aws-s3-ca.crt \ --from-literal=aws-s3-key="" \ --from-literal=aws-s3-key-secret="" +kubectl -n "$PGO_OPERATOR_NAMESPACE" label secret pgo-backrest-repo-config \ + vendor=crunchydata ``` ### Certificates (optional) -The PostgreSQL Operator has an API that uses TLS to communicate securely with clients. If you have +PGO has an API that uses TLS to communicate securely with clients. If you have a certificate bundle validated by your organization, you can install it now. If not, the API will automatically generate and use a self-signed certificate. @@ -82,9 +41,6 @@ kubectl -n "$PGO_OPERATOR_NAMESPACE" create secret tls pgo.tls \ --key=/path/to/server.key ``` -Once these resources are in place, the PostgreSQL Operator can be installed into the cluster. - - ## Installation Create an `OperatorGroup` and a `Subscription` in your chosen namespace. @@ -117,7 +73,7 @@ YAML ## After You Install -Once the PostgreSQL Operator is installed in your Kubernetes cluster, you will need to do a few things +Once PGO is installed in your Kubernetes cluster, you will need to do a few things to use the [PostgreSQL Operator Client]({{< relref "/pgo-client/_index.md" >}}). Install the first set of client credentials and download the `pgo` binary and client certificates. @@ -127,7 +83,7 @@ PGO_CMD=kubectl ./deploy/install-bootstrap-creds.sh PGO_CMD=kubectl ./installers/kubectl/client-setup.sh ``` -The client needs to be able to reach the PostgreSQL Operator API from outside the Kubernetes cluster. +The client needs to be able to reach the PGO API from outside the Kubernetes cluster. Create an external service or forward a port locally. ``` @@ -152,4 +108,3 @@ pgo version # pgo client version {{< param operatorVersion >}} # pgo-apiserver version {{< param operatorVersion >}} ``` - diff --git a/docs/content/installation/pgo-client.md b/docs/content/installation/pgo-client.md index 69dae759e1..6c584168df 100644 --- a/docs/content/installation/pgo-client.md +++ b/docs/content/installation/pgo-client.md @@ -1,5 +1,5 @@ --- -title: "Install `pgo` Client" +title: "Install \"pgo\" Client" date: draft: false weight: 30 @@ -8,23 +8,22 @@ weight: 30 # Install the PostgreSQL Operator (`pgo`) Client The following will install and configure the `pgo` client on all systems. For the -purpose of these instructions it's assumed that the Crunchy PostgreSQL Operator -is already deployed. +purpose of these instructions it's assumed that PGO: the Postgres Operator from Crunchy +Data is already deployed. ## Prerequisites * For Kubernetes deployments: [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) configured to communicate with Kubernetes * For OpenShift deployments: [oc](https://docs.openshift.com/container-platform/3.11/cli_reference/get_started_cli.html) configured to communicate with OpenShift -To authenticate with the Crunchy PostgreSQL Operator API: +To authenticate with the PGO API: * Client CA Certificate * Client TLS Certificate * Client Key * `pgouser` file containing `:` -All of the requirements above should be obtained from an administrator who installed the Crunchy -PostgreSQL Operator. +All of the requirements above should be obtained from an administrator who installed PGO. ## Linux and macOS @@ -288,5 +287,4 @@ properly by simply running the following: pgo version ``` -If the above command outputs versions of both the client and API server, the Crunchy PostgreSQL -Operator client has been installed successfully. +If the above command outputs versions of both the client and API server, the `pgo` client has been installed successfully. diff --git a/docs/content/installation/postgres-operator.md b/docs/content/installation/postgres-operator.md index 0cbd542dd5..0a79ef9019 100644 --- a/docs/content/installation/postgres-operator.md +++ b/docs/content/installation/postgres-operator.md @@ -1,11 +1,11 @@ --- -title: Install the PostgreSQL Operator +title: Install PGO the Postgres Operator date: draft: false weight: 20 --- -# The PostgreSQL Operator Installer +# PGO: Postgres Operator Installer ## Quickstart @@ -23,8 +23,8 @@ the PostgreSQL Operator. ## Overview -The PostgreSQL Operator comes with a container called `pgo-deployer` which -handles a variety of lifecycle actions for the PostgreSQL Operator, including: +PGO comes with a container called `pgo-deployer` which +handles a variety of lifecycle actions for the Postgres Operator, including: - Installation - Upgrading @@ -52,7 +52,7 @@ environmental requirements. By default, the `pgo-deployer` uses a ServiceAccount called `pgo-deployer-sa` that has a ClusterRoleBinding (`pgo-deployer-crb`) with several ClusterRole permissions. This is required to create the [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) -that power the PostgreSQL Operator. While the PostgreSQL Operator itself can be +that power PGO. While the Postgres Operator itself can be scoped to a specific namespace, you will need to have `cluster-admin` for the initial deployment, or privileges that allow you to install Custom Resource Definitions. The required list of privileges are available in the [postgres-operator.yml](https://raw.githubusercontent.com/CrunchyData/postgres-operator/v{{< param operatorVersion >}}/installers/kubectl/postgres-operator.yml) file: @@ -82,7 +82,7 @@ For example, to create the `pgo` namespace: kubectl create namespace pgo ``` -The PostgreSQL Operator has the ability to manage PostgreSQL clusters across +The Postgres Operator has the ability to manage PostgreSQL clusters across multiple Kubernetes [Namespaces](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/), including the ability to add and remove Namespaces that it watches. Doing so does require the PostgreSQL Operator to have elevated privileges, and as such, @@ -120,7 +120,7 @@ PostgreSQL Operator cannot create the RBAC itself. ## Configuration - `postgres-operator.yml` The `postgres-operator.yml` file contains all of the configuration parameters -for deploying the PostgreSQL Operator. The [example file](https://github.com/CrunchyData/postgres-operator/blob/v{{< param operatorVersion >}}/installers/kubectl/postgres-operator.yml) +for deploying PGO. The [example file](https://github.com/CrunchyData/postgres-operator/blob/v{{< param operatorVersion >}}/installers/kubectl/postgres-operator.yml) contains defaults that should work in most Kubernetes environments, but it may require some customization. @@ -138,7 +138,7 @@ set to `install`, `update`, and `uninstall`. ### Image Pull Secrets -If you are pulling the PostgreSQL Operator images from a private registry, you +If you are pulling PGO images from a private registry, you will need to setup an [imagePullSecret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) with access to the registry. The image pull secret will need to be added to the @@ -174,7 +174,7 @@ oc secrets link --for=pull --namespace=}}) for instructions on how to install the PostgreSQL Operator Monitoring infrastructure. diff --git a/docs/content/installation/prerequisites.md b/docs/content/installation/prerequisites.md index 2df54859d9..0a70756057 100644 --- a/docs/content/installation/prerequisites.md +++ b/docs/content/installation/prerequisites.md @@ -7,11 +7,11 @@ weight: 10 # Prerequisites -The following is required prior to installing PostgreSQL Operator. +The following is required prior to installing PGO. ## Environment -The PostgreSQL Operator is tested in the following environments: +PGO is tested in the following environments: * Kubernetes v1.13+ * Red Hat OpenShift v3.11+ @@ -22,7 +22,7 @@ The PostgreSQL Operator is tested in the following environments: #### IBM Cloud Pak Data -If you install the PostgreSQL Operator, which comes with Crunchy +If you install PGO, which comes with Crunchy PostgreSQL for Kubernetes, on IBM Cloud Pak Data, please note the following additional requirements: @@ -33,14 +33,14 @@ additional requirements: * Minimum Memory Requirements: 120MB * Minimum Storage Requirements: 5MB -**Note**: PostgreSQL clusters deployed by the PostgreSQL Operator with +**Note**: PostgreSQL clusters deployed by PGO with Crunchy PostgreSQL for Kubernetes are workload dependent. As such, users should allocate enough resources for their PostgreSQL clusters. ## Client Interfaces -The PostgreSQL Operator installer will install the [`pgo` client]({{< relref "/pgo-client/_index.md" >}}) interface -to help with using the PostgreSQL Operator. However, it is also recommend that +The Postgres Operator installer will install the [`pgo` client]({{< relref "/pgo-client/_index.md" >}}) interface +to help with using PGO. However, it is also recommend that you have access to [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or [`oc`](https://www.okd.io/download.html) and are able to communicate with the Kubernetes or OpenShift cluster that you are working with. @@ -63,7 +63,7 @@ access to these ports. ### Application Ports -The PostgreSQL Operator deploys different services to support a production +PGO deploys different services to support a production PostgreSQL environment. Below is a list of the applications and their default Service ports. diff --git a/docs/content/pgo-client/_index.md b/docs/content/pgo-client/_index.md index f1c6149c3f..bc33712e18 100644 --- a/docs/content/pgo-client/_index.md +++ b/docs/content/pgo-client/_index.md @@ -6,11 +6,11 @@ weight: 50 --- The PostgreSQL Operator Client, aka `pgo`, is the most convenient way to -interact with the PostgreSQL Operator. `pgo` provides many convenience methods +interact with the Postgres Operator. `pgo` provides many convenience methods for creating, managing, and deleting PostgreSQL clusters through a series of simple commands. The `pgo` client interfaces with the API that is provided by the PostgreSQL Operator and can leverage the RBAC and TLS systems that are -provided by the PostgreSQL Operator +provided by the PGO: PostgreSQL Operator. ![Architecture](/Operator-Architecture.png) @@ -154,7 +154,6 @@ client. | Name | Description | | :-- | :-- | | `EXCLUDE_OS_TRUST` | Exclude CA certs from OS default trust store. | -| `GENERATE_BASH_COMPLETION` | If set, will allow `pgo` to leverage "bash completion" to help complete commands as they are typed. | | `PGO_APISERVER_URL` | The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a `/`. | | `PGO_CA_CERT` | The CA certificate file path for authenticating to the PostgreSQL Operator apiserver. | | `PGO_CLIENT_CERT` | The client certificate file path for authenticating to the PostgreSQL Operator apiserver. | diff --git a/docs/content/pgo-client/common-tasks.md b/docs/content/pgo-client/common-tasks.md index 50bdd46e72..27680bd97b 100644 --- a/docs/content/pgo-client/common-tasks.md +++ b/docs/content/pgo-client/common-tasks.md @@ -107,7 +107,7 @@ which yields output similar to: BasicAuth: "" Cluster: CCPImagePrefix: crunchydata - CCPImageTag: {{< param centosBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}} + CCPImageTag: {{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}} Policies: "" Metrics: false Badger: false @@ -128,15 +128,13 @@ Cluster: BackrestS3URIStyle: "" BackrestS3VerifyTLS: true DisableAutofail: false - PgmonitorPassword: "" - EnableCrunchyadm: false DisableReplicaStartFailReinit: false PodAntiAffinity: preferred SyncReplication: false Pgo: Audit: false PGOImagePrefix: crunchydata - PGOImageTag: {{< param centosBase >}}-{{< param operatorVersion >}} + PGOImageTag: {{< param ubiBase >}}-{{< param operatorVersion >}} PrimaryStorage: nfsstorage BackupStorage: nfsstorage ReplicaStorage: nfsstorage @@ -253,7 +251,7 @@ example below, the cluster will use PostgreSQL {{< param postgresVersion >}} and ```shell pgo create cluster hagiscluster \ --ccp-image=crunchy-postgres-gis-ha \ - --ccp-image-tag={{< param centosBase >}}-{{< param postgresVersion >}}-{{< param postgisVersion >}}-{{< param operatorVersion >}} + --ccp-image-tag={{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param postgisVersion >}}-{{< param operatorVersion >}} ``` #### Create a PostgreSQL Cluster with a Tablespace @@ -378,7 +376,7 @@ pgo show cluster hacluster which will yield output similar to: ``` -cluster : hacluster (crunchy-postgres-ha:{{< param centosBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}}) +cluster : hacluster (crunchy-postgres-ha:{{< param ubiBase >}}-{{< param postgresVersion >}}-{{< param operatorVersion >}}) pod : hacluster-6dc6cfcfb9-f9knq (Running) on node01 (1/1) (primary) pvc : hacluster resources : CPU Limit= Memory Limit=, CPU Request= Memory Request= @@ -386,7 +384,7 @@ cluster : hacluster (crunchy-postgres-ha:{{< param centosBase >}}-{{< param post deployment : hacluster deployment : hacluster-backrest-shared-repo service : hacluster - ClusterIP (10.102.20.42) - labels : pg-pod-anti-affinity= archive-timeout=60 crunchy-pgbadger=false crunchy-postgres-exporter=false deployment-name=hacluster pg-cluster=hacluster crunchy-pgha-scope=hacluster autofail=true pgo-backrest=true pgo-version={{< param operatorVersion >}} current-primary=hacluster name=hacluster pgouser=admin workflowid=ae714d12-f5d0-4fa9-910f-21944b41dec8 + labels : archive-timeout=60 deployment-name=hacluster pg-cluster=hacluster crunchy-pgha-scope=hacluster pgo-version={{< param operatorVersion >}} current-primary=hacluster name=hacluster pgouser=admin workflowid=ae714d12-f5d0-4fa9-910f-21944b41dec8 ``` ### Deleting a Cluster @@ -699,7 +697,7 @@ high availability on the PostgreSQL cluster manually. You can re-enable high availability by executing the following command: ``` -pgo update cluster hacluster --autofail=true +pgo update cluster hacluster --enable-autofail ``` ### Logical Backups (`pg_dump` / `pg_dumpall`) @@ -818,13 +816,26 @@ pgo failover --query hacluster The PostgreSQL Operator is set up with an automated failover system based on distributed consensus, but there may be times where you wish to have your -cluster manually failover. If you wish to have your cluster manually failover, -first, query your cluster to determine which failover targets are available. -The query command also provides information that may help your decision, such as -replication lag: +cluster manually failover. There are two ways to issue a manual failover to +your PostgreSQL cluster: + +1. Allow for the PostgreSQL Operator to select the best replica candidate to +failover to +2. Select your own replica candidate to failover to. + +To have the PostgreSQL Operator select the best replica candidate for failover, +all you need to do is execute the following command: + +``` +pgo failover hacluster +``` + +If you wish to have your cluster manually failover, you must first query your +cluster to determine which failover targets are available. The query command +also provides information that may help your decision, such as replication lag: ```shell -pgo failover --query hacluster +pgo failover hacluster --query ``` Once you have selected the replica that is best for your to failover to, you can @@ -835,7 +846,9 @@ pgo failover hacluster --target=hacluster-abcd ``` where `hacluster-abcd` is the name of the PostgreSQL instance that you want to -promote to become the new primary +promote to become the new primary. + +Both methods perform the failover immediately upon execution. #### Destroying a Replica @@ -1264,7 +1277,7 @@ specifications: ```shell pgo create cluster hippo --pgbouncer --replica-count=2 \ - --pgbackrest-storage-type=local,s3 \ + --pgbackrest-storage-type=posix,s3 \ --pgbackrest-s3-key= \ --pgbackrest-s3-key-secret= \ --pgbackrest-s3-bucket=watering-hole \ @@ -1298,6 +1311,14 @@ pgo create cluster hippo-standby --standby --replica-count=2 \ --password=opensourcehippo ``` +If you are unsure of your user credentials form the original `hippo` cluster, +you can retrieve them using the [`pgo show user`]({{< relref "/pgo-client/reference/pgo_show_user.md" >}}) +command with the `--show-system-accounts` flag: + +``` +pgo show user hippo --show-system-accounts +``` + The standby cluster will take a few moments to bootstrap, but it is now set up! ### Promoting a Standby Cluster @@ -1477,22 +1498,7 @@ You can view policies as following: ### Connection Pooling via pgBouncer -To add a pgbouncer Deployment to your Postgres cluster, enter: - - pgo create cluster hacluster --pgbouncer -n pgouser1 - -You can add pgbouncer after a Postgres cluster is created as follows: - - pgo create pgbouncer hacluster - pgo create pgbouncer --selector=name=hacluster - -You can also specify a pgbouncer password as follows: - - pgo create cluster hacluster --pgbouncer --pgbouncer-pass=somepass -n pgouser1 - -You can remove a pgbouncer from a cluster as follows: - - pgo delete pgbouncer hacluster -n pgouser1 +Please see the [tutorial on pgBouncer]({{< relref "tutorial/pgbouncer.md" >}}). ### Query Analysis via pgBadger diff --git a/docs/content/pgo-client/reference/pgo_apply.md b/docs/content/pgo-client/reference/pgo_apply.md index 403d6c9d47..8cb65da368 100644 --- a/docs/content/pgo-client/reference/pgo_apply.md +++ b/docs/content/pgo-client/reference/pgo_apply.md @@ -28,7 +28,7 @@ pgo apply [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -42,4 +42,4 @@ pgo apply [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_backup.md b/docs/content/pgo-client/reference/pgo_backup.md index 0e4c65a530..bca8d77396 100644 --- a/docs/content/pgo-client/reference/pgo_backup.md +++ b/docs/content/pgo-client/reference/pgo_backup.md @@ -22,7 +22,7 @@ pgo backup [flags] --backup-type string The backup type to perform. Default is pgbackrest. Valid backup types are pgbackrest and pgdump. (default "pgbackrest") -d, --database string The name of the database pgdump will backup. (default "postgres") -h, --help help for backup - --pgbackrest-storage-type string The type of storage to use when scheduling pgBackRest backups. Either "local", "s3" or both, comma separated. (default "local") + --pgbackrest-storage-type string The type of storage to use when scheduling pgBackRest backups. Either "posix", "s3" or both, comma separated. (default "posix") --pvc-name string The PVC name to use for the backup instead of the default. -s, --selector string The selector to use for cluster filtering. ``` @@ -30,7 +30,7 @@ pgo backup [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -44,4 +44,4 @@ pgo backup [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_cat.md b/docs/content/pgo-client/reference/pgo_cat.md index cef3887e31..0b4a13747f 100644 --- a/docs/content/pgo-client/reference/pgo_cat.md +++ b/docs/content/pgo-client/reference/pgo_cat.md @@ -24,7 +24,7 @@ pgo cat [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -38,4 +38,4 @@ pgo cat [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_clone.md b/docs/content/pgo-client/reference/pgo_clone.md deleted file mode 100644 index 6f07741010..0000000000 --- a/docs/content/pgo-client/reference/pgo_clone.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "pgo clone" ---- -## pgo clone - -Copies the primary database of an existing cluster to a new cluster - -### Synopsis - -Clone makes a copy of an existing PostgreSQL cluster managed by the Operator and creates a new PostgreSQL cluster managed by the Operator, with the data from the old cluster. - - pgo clone oldcluster newcluster - -``` -pgo clone [flags] -``` - -### Options - -``` - --enable-metrics If sets, enables metrics collection on the newly cloned cluster - -h, --help help for clone - --pgbackrest-pvc-size string The size of the PVC capacity for the pgBackRest repository. Overrides the value set in the storage class. This is ignored if the storage type of "local" is not used. Must follow the standard Kubernetes format, e.g. "10.1Gi" - --pgbackrest-storage-source string The data source for the clone when both "local" and "s3" are enabled in the source cluster. Either "local", "s3" or both, comma separated. (default "local") - --pvc-size string The size of the PVC capacity for primary and replica PostgreSQL instances. Overrides the value set in the storage class. Must follow the standard Kubernetes format, e.g. "10.1Gi" -``` - -### Options inherited from parent commands - -``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. - --debug Enable additional output for debugging. - --disable-tls Disable TLS authentication to the Postgres Operator. - --exclude-os-trust Exclude CA certs from OS default trust store - -n, --namespace string The namespace to use for pgo requests. - --pgo-ca-cert string The CA Certificate file path for authenticating to the PostgreSQL Operator apiserver. - --pgo-client-cert string The Client Certificate file path for authenticating to the PostgreSQL Operator apiserver. - --pgo-client-key string The Client Key file path for authenticating to the PostgreSQL Operator apiserver. -``` - -### SEE ALSO - -* [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. - -###### Auto generated by spf13/cobra on 2-Jul-2020 diff --git a/docs/content/pgo-client/reference/pgo_create.md b/docs/content/pgo-client/reference/pgo_create.md index 14cc07b5d0..2bd589dd40 100644 --- a/docs/content/pgo-client/reference/pgo_create.md +++ b/docs/content/pgo-client/reference/pgo_create.md @@ -30,7 +30,7 @@ pgo create [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -53,4 +53,4 @@ pgo create [flags] * [pgo create schedule](/pgo-client/reference/pgo_create_schedule/) - Create a cron-like scheduled task * [pgo create user](/pgo-client/reference/pgo_create_user/) - Create a PostgreSQL user -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_create_cluster.md b/docs/content/pgo-client/reference/pgo_create_cluster.md index 265b3c5517..4fa7532a5b 100644 --- a/docs/content/pgo-client/reference/pgo_create_cluster.md +++ b/docs/content/pgo-client/reference/pgo_create_cluster.md @@ -45,10 +45,11 @@ pgo create cluster [flags] --exporter-memory string Set the amount of memory to request for the Crunchy Postgres Exporter sidecar container. Defaults to server value (24Mi). --exporter-memory-limit string Set the amount of memory to limit for the Crunchy Postgres Exporter sidecar container. -h, --help help for cluster - -l, --labels string The labels to apply to this cluster. + --label strings Add labels to apply to the PostgreSQL cluster, e.g. "key=value", "prefix/key=value". Can specify flag multiple times. --memory string Set the amount of RAM to request, e.g. 1GiB. Overrides the default server value. --memory-limit string Set the amount of RAM to limit, e.g. 1GiB. --metrics Adds the crunchy-postgres-exporter container to the database pod. + --node-affinity-type string Sets the type of node affinity to use. Can be either preferred (default) or required. Must be used with --node-label --node-label string The node label (key=value) to use in placing the primary database. If not set, any node is used. --password string The password to use for standard user account created during cluster initialization. --password-length int If no password is supplied, sets the length of the automatically generated password. Defaults to the value set on the server. @@ -59,7 +60,7 @@ pgo create cluster [flags] --pgbackrest-custom-config string The name of a ConfigMap containing pgBackRest configuration files. --pgbackrest-memory string Set the amount of memory to request for the pgBackRest repository. Defaults to server value (48Mi). --pgbackrest-memory-limit string Set the amount of memory to limit for the pgBackRest repository. - --pgbackrest-pvc-size string The size of the PVC capacity for the pgBackRest repository. Overrides the value set in the storage class. This is ignored if the storage type of "local" is not used. Must follow the standard Kubernetes format, e.g. "10.1Gi" + --pgbackrest-pvc-size string The size of the PVC capacity for the pgBackRest repository. Overrides the value set in the storage class. This is ignored if the storage type of "posix" is not used. Must follow the standard Kubernetes format, e.g. "10.1Gi" --pgbackrest-repo-path string The pgBackRest repository path that should be utilized instead of the default. Required for standby clusters to define the location of an existing pgBackRest repository. --pgbackrest-s3-bucket string The AWS S3 bucket that should be utilized for the cluster when the "s3" storage type is enabled for pgBackRest. @@ -71,7 +72,7 @@ pgo create cluster [flags] --pgbackrest-s3-uri-style string Specifies whether "host" or "path" style URIs will be used when connecting to S3. --pgbackrest-s3-verify-tls This sets if pgBackRest should verify the TLS certificate when connecting to S3. To disable, use "--pgbackrest-s3-verify-tls=false". (default true) --pgbackrest-storage-config string The name of the storage config in pgo.yaml to use for the pgBackRest local repository. - --pgbackrest-storage-type string The type of storage to use with pgBackRest. Either "local", "s3" or both, comma separated. (default "local") + --pgbackrest-storage-type string The type of storage to use with pgBackRest. Either "posix", "s3" or both, comma separated. (default "posix") --pgbadger Adds the crunchy-pgbadger container to the database pod. --pgbouncer Adds a crunchy-pgbouncer deployment to the cluster. --pgbouncer-cpu string Set the number of millicores to request for CPU for pgBouncer. Defaults to being unset. @@ -79,6 +80,8 @@ pgo create cluster [flags] --pgbouncer-memory string Set the amount of memory to request for pgBouncer. Defaults to server value (24Mi). --pgbouncer-memory-limit string Set the amount of memory to limit for pgBouncer. --pgbouncer-replicas int32 Set the total number of pgBouncer instances to deploy. If not set, defaults to 1. + --pgbouncer-service-type string The Service type to use for pgBouncer. Defaults to the Service type of the PostgreSQL cluster. + --pgbouncer-tls-secret string The name of the secret that contains the TLS keypair to use for enabling pgBouncer to accept TLS connections. Must also set server-tls-secret and server-ca-secret. --pgo-image-prefix string The PGOImagePrefix to use for cluster creation. If specified, overrides the global configuration. --pod-anti-affinity string Specifies the type of anti-affinity that should be utilized when applying default pod anti-affinity rules to PG clusters (default "preferred") --pod-anti-affinity-pgbackrest string Set the Pod anti-affinity rules specifically for the pgBackRest repository. Defaults to the default cluster pod anti-affinity (i.e. "preferred"), or the value set by --pod-anti-affinity @@ -108,6 +111,9 @@ pgo create cluster [flags] --tablespace=name=ts1:storageconfig=nfsstorage:pvcsize=10Gi --tls-only If true, forces all PostgreSQL connections to be over TLS. Must also set "server-tls-secret" and "server-ca-secret" + --toleration strings Set Pod tolerations for each PostgreSQL instance in a cluster. + The general format is "key=value:Effect" + For example, to add an Exists and an Equals toleration: "--toleration=ssd:NoSchedule,zone=east:NoSchedule" -u, --username string The username to use for creating the PostgreSQL user with standard permissions. Defaults to the value in the PostgreSQL Operator configuration. --wal-storage-config string The name of a storage configuration in pgo.yaml to use for PostgreSQL's write-ahead log (WAL). --wal-storage-size string The size of the capacity for WAL storage, which overrides any value in the storage configuration. Follows the Kubernetes quantity format. @@ -116,7 +122,7 @@ pgo create cluster [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -130,4 +136,4 @@ pgo create cluster [flags] * [pgo create](/pgo-client/reference/pgo_create/) - Create a Postgres Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 18-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_create_namespace.md b/docs/content/pgo-client/reference/pgo_create_namespace.md index 90894e2b77..bf544aba72 100644 --- a/docs/content/pgo-client/reference/pgo_create_namespace.md +++ b/docs/content/pgo-client/reference/pgo_create_namespace.md @@ -28,7 +28,7 @@ pgo create namespace [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -42,4 +42,4 @@ pgo create namespace [flags] * [pgo create](/pgo-client/reference/pgo_create/) - Create a Postgres Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_create_pgadmin.md b/docs/content/pgo-client/reference/pgo_create_pgadmin.md index 1e0c43d578..0dd744cbce 100644 --- a/docs/content/pgo-client/reference/pgo_create_pgadmin.md +++ b/docs/content/pgo-client/reference/pgo_create_pgadmin.md @@ -25,7 +25,7 @@ pgo create pgadmin [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo create pgadmin [flags] * [pgo create](/pgo-client/reference/pgo_create/) - Create a Postgres Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_create_pgbouncer.md b/docs/content/pgo-client/reference/pgo_create_pgbouncer.md index ad406e60e0..c0e10c6b41 100644 --- a/docs/content/pgo-client/reference/pgo_create_pgbouncer.md +++ b/docs/content/pgo-client/reference/pgo_create_pgbouncer.md @@ -25,12 +25,14 @@ pgo create pgbouncer [flags] --memory-limit string Set the amount of memory to limit for pgBouncer. --replicas int32 Set the total number of pgBouncer instances to deploy. If not set, defaults to 1. -s, --selector string The selector to use for cluster filtering. + --service-type string The Service type to use for pgBouncer. Defaults to the Service type of the PostgreSQL cluster. + --tls-secret string The name of the secret that contains the TLS keypair to use for enabling pgBouncer to accept TLS connections. The PostgreSQL cluster must have TLS enabled. ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -44,4 +46,4 @@ pgo create pgbouncer [flags] * [pgo create](/pgo-client/reference/pgo_create/) - Create a Postgres Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_create_pgorole.md b/docs/content/pgo-client/reference/pgo_create_pgorole.md index 50bcc66915..5320296bb4 100644 --- a/docs/content/pgo-client/reference/pgo_create_pgorole.md +++ b/docs/content/pgo-client/reference/pgo_create_pgorole.md @@ -25,7 +25,7 @@ pgo create pgorole [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo create pgorole [flags] * [pgo create](/pgo-client/reference/pgo_create/) - Create a Postgres Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_create_pgouser.md b/docs/content/pgo-client/reference/pgo_create_pgouser.md index 35513ea915..aa5d39b5eb 100644 --- a/docs/content/pgo-client/reference/pgo_create_pgouser.md +++ b/docs/content/pgo-client/reference/pgo_create_pgouser.md @@ -28,7 +28,7 @@ pgo create pgouser [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -42,4 +42,4 @@ pgo create pgouser [flags] * [pgo create](/pgo-client/reference/pgo_create/) - Create a Postgres Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_create_policy.md b/docs/content/pgo-client/reference/pgo_create_policy.md index ac17b059f6..0bce879654 100644 --- a/docs/content/pgo-client/reference/pgo_create_policy.md +++ b/docs/content/pgo-client/reference/pgo_create_policy.md @@ -20,13 +20,12 @@ pgo create policy [flags] ``` -h, --help help for policy -i, --in-file string The policy file path to use for adding a policy. - -u, --url string The url to use for adding a policy. ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -40,4 +39,4 @@ pgo create policy [flags] * [pgo create](/pgo-client/reference/pgo_create/) - Create a Postgres Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_create_schedule.md b/docs/content/pgo-client/reference/pgo_create_schedule.md index 4aeb07fe88..d303b28488 100644 --- a/docs/content/pgo-client/reference/pgo_create_schedule.md +++ b/docs/content/pgo-client/reference/pgo_create_schedule.md @@ -22,7 +22,7 @@ pgo create schedule [flags] --database string The database to run the SQL policy against. -h, --help help for schedule --pgbackrest-backup-type string The type of pgBackRest backup to schedule (full, diff or incr). - --pgbackrest-storage-type string The type of storage to use when scheduling pgBackRest backups. Either "local", "s3" or both, comma separated. (default "local") + --pgbackrest-storage-type string The type of storage to use when scheduling pgBackRest backups. Either "posix", "s3" or both, comma separated. (default "posix") --policy string The policy to use for SQL schedules. --schedule string The schedule assigned to the cron task. --schedule-opts string The custom options passed to the create schedule API. @@ -34,7 +34,7 @@ pgo create schedule [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -48,4 +48,4 @@ pgo create schedule [flags] * [pgo create](/pgo-client/reference/pgo_create/) - Create a Postgres Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_create_user.md b/docs/content/pgo-client/reference/pgo_create_user.md index cd38c71059..106b27a59f 100644 --- a/docs/content/pgo-client/reference/pgo_create_user.md +++ b/docs/content/pgo-client/reference/pgo_create_user.md @@ -36,7 +36,7 @@ pgo create user [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -50,4 +50,4 @@ pgo create user [flags] * [pgo create](/pgo-client/reference/pgo_create/) - Create a Postgres Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete.md b/docs/content/pgo-client/reference/pgo_delete.md index b2233b9c50..1eb47af507 100644 --- a/docs/content/pgo-client/reference/pgo_delete.md +++ b/docs/content/pgo-client/reference/pgo_delete.md @@ -9,7 +9,7 @@ Delete an Operator resource The delete command allows you to delete an Operator resource. For example: - pgo delete backup mycluster + pgo delete backup mycluster --target=backup-name pgo delete cluster mycluster pgo delete cluster mycluster --delete-data pgo delete cluster mycluster --delete-data --delete-backups @@ -39,7 +39,7 @@ pgo delete [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -64,4 +64,4 @@ pgo delete [flags] * [pgo delete schedule](/pgo-client/reference/pgo_delete_schedule/) - Delete a schedule * [pgo delete user](/pgo-client/reference/pgo_delete_user/) - Delete a user -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_backup.md b/docs/content/pgo-client/reference/pgo_delete_backup.md index 22bf95e3c9..c708d8e12b 100644 --- a/docs/content/pgo-client/reference/pgo_delete_backup.md +++ b/docs/content/pgo-client/reference/pgo_delete_backup.md @@ -7,9 +7,9 @@ Delete a backup ### Synopsis -Delete a backup. For example: +Delete a backup from pgBackRest. Requires a target backup. For example: - pgo delete backup mydatabase + pgo delete backup clustername --target=20201220-171801F ``` pgo delete backup [flags] @@ -18,13 +18,15 @@ pgo delete backup [flags] ### Options ``` - -h, --help help for backup + -h, --help help for backup + --no-prompt No command line confirmation. + --target string The backup to expire, e.g. "20201220-171801F". Use "pgo show backup" to determine the target. ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -38,4 +40,4 @@ pgo delete backup [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_cluster.md b/docs/content/pgo-client/reference/pgo_delete_cluster.md index bf550cf53e..0243dc8c3c 100644 --- a/docs/content/pgo-client/reference/pgo_delete_cluster.md +++ b/docs/content/pgo-client/reference/pgo_delete_cluster.md @@ -30,7 +30,7 @@ pgo delete cluster [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -44,4 +44,4 @@ pgo delete cluster [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_label.md b/docs/content/pgo-client/reference/pgo_delete_label.md index b8ad151b73..dbd22e4bf6 100644 --- a/docs/content/pgo-client/reference/pgo_delete_label.md +++ b/docs/content/pgo-client/reference/pgo_delete_label.md @@ -21,14 +21,14 @@ pgo delete label [flags] ``` -h, --help help for label - --label string The label to delete for any selected or specified clusters. + --label strings Delete labels to apply to the PostgreSQL cluster, e.g. "key=value", "prefix/key=value". Can specify flag multiple times. -s, --selector string The selector to use for cluster filtering. ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -42,4 +42,4 @@ pgo delete label [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 18-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_namespace.md b/docs/content/pgo-client/reference/pgo_delete_namespace.md index 63e9fa95db..a339bf0218 100644 --- a/docs/content/pgo-client/reference/pgo_delete_namespace.md +++ b/docs/content/pgo-client/reference/pgo_delete_namespace.md @@ -25,7 +25,7 @@ pgo delete namespace [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo delete namespace [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_pgadmin.md b/docs/content/pgo-client/reference/pgo_delete_pgadmin.md index d48bacd9d0..5f778c2eb5 100644 --- a/docs/content/pgo-client/reference/pgo_delete_pgadmin.md +++ b/docs/content/pgo-client/reference/pgo_delete_pgadmin.md @@ -26,7 +26,7 @@ pgo delete pgadmin [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -40,4 +40,4 @@ pgo delete pgadmin [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_pgbouncer.md b/docs/content/pgo-client/reference/pgo_delete_pgbouncer.md index bcf71def78..b1524b1a78 100644 --- a/docs/content/pgo-client/reference/pgo_delete_pgbouncer.md +++ b/docs/content/pgo-client/reference/pgo_delete_pgbouncer.md @@ -27,7 +27,7 @@ pgo delete pgbouncer [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -41,4 +41,4 @@ pgo delete pgbouncer [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_pgorole.md b/docs/content/pgo-client/reference/pgo_delete_pgorole.md index f67359235d..682baf156e 100644 --- a/docs/content/pgo-client/reference/pgo_delete_pgorole.md +++ b/docs/content/pgo-client/reference/pgo_delete_pgorole.md @@ -26,7 +26,7 @@ pgo delete pgorole [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -40,4 +40,4 @@ pgo delete pgorole [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_pgouser.md b/docs/content/pgo-client/reference/pgo_delete_pgouser.md index 0a4bba911f..2bddabd0e6 100644 --- a/docs/content/pgo-client/reference/pgo_delete_pgouser.md +++ b/docs/content/pgo-client/reference/pgo_delete_pgouser.md @@ -26,7 +26,7 @@ pgo delete pgouser [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -40,4 +40,4 @@ pgo delete pgouser [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_policy.md b/docs/content/pgo-client/reference/pgo_delete_policy.md index cf40d26835..5f565e764b 100644 --- a/docs/content/pgo-client/reference/pgo_delete_policy.md +++ b/docs/content/pgo-client/reference/pgo_delete_policy.md @@ -26,7 +26,7 @@ pgo delete policy [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -40,4 +40,4 @@ pgo delete policy [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_schedule.md b/docs/content/pgo-client/reference/pgo_delete_schedule.md index b7de536bbd..600a797d11 100644 --- a/docs/content/pgo-client/reference/pgo_delete_schedule.md +++ b/docs/content/pgo-client/reference/pgo_delete_schedule.md @@ -29,7 +29,7 @@ pgo delete schedule [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -43,4 +43,4 @@ pgo delete schedule [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_delete_user.md b/docs/content/pgo-client/reference/pgo_delete_user.md index ea4f7f75ae..48cef7d07c 100644 --- a/docs/content/pgo-client/reference/pgo_delete_user.md +++ b/docs/content/pgo-client/reference/pgo_delete_user.md @@ -29,7 +29,7 @@ pgo delete user [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -43,4 +43,4 @@ pgo delete user [flags] * [pgo delete](/pgo-client/reference/pgo_delete/) - Delete an Operator resource -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_df.md b/docs/content/pgo-client/reference/pgo_df.md index 3a744dfbe9..7b81786c3a 100644 --- a/docs/content/pgo-client/reference/pgo_df.md +++ b/docs/content/pgo-client/reference/pgo_df.md @@ -29,7 +29,7 @@ pgo df [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -43,4 +43,4 @@ pgo df [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_failover.md b/docs/content/pgo-client/reference/pgo_failover.md index d60cefd417..deca90ecc7 100644 --- a/docs/content/pgo-client/reference/pgo_failover.md +++ b/docs/content/pgo-client/reference/pgo_failover.md @@ -9,7 +9,12 @@ Performs a manual failover Performs a manual failover. For example: - pgo failover mycluster + # have the operator select the best target candidate + pgo failover hippo + # get a list of target candidates + pgo failover hippo --query + # failover to a specific target candidate + pgo failover hippo --target=hippo-abcd ``` pgo failover [flags] @@ -18,6 +23,7 @@ pgo failover [flags] ### Options ``` + --force Force the failover to occur, regardless of the health of the target instance. Must be used with "--target". -h, --help help for failover --no-prompt No command line confirmation. --query Prints the list of failover candidates. @@ -27,7 +33,7 @@ pgo failover [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -41,4 +47,4 @@ pgo failover [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_label.md b/docs/content/pgo-client/reference/pgo_label.md index 14f6486ad7..143b217ec6 100644 --- a/docs/content/pgo-client/reference/pgo_label.md +++ b/docs/content/pgo-client/reference/pgo_label.md @@ -23,14 +23,14 @@ pgo label [flags] ``` --dry-run Shows the clusters that the label would be applied to, without labelling them. -h, --help help for label - --label string The new label to apply for any selected or specified clusters. + --label strings Add labels to apply to the PostgreSQL cluster, e.g. "key=value", "prefix/key=value". Can specify flag multiple times. -s, --selector string The selector to use for cluster filtering. ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -44,4 +44,4 @@ pgo label [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 18-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_reload.md b/docs/content/pgo-client/reference/pgo_reload.md index ebc8dc2e1a..f6191cfe17 100644 --- a/docs/content/pgo-client/reference/pgo_reload.md +++ b/docs/content/pgo-client/reference/pgo_reload.md @@ -26,7 +26,7 @@ pgo reload [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -40,4 +40,4 @@ pgo reload [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_restart.md b/docs/content/pgo-client/reference/pgo_restart.md index dc0517f1db..0a1b3e3f4c 100644 --- a/docs/content/pgo-client/reference/pgo_restart.md +++ b/docs/content/pgo-client/reference/pgo_restart.md @@ -12,6 +12,9 @@ Restarts one or more PostgreSQL databases within a PostgreSQL cluster. For example, to restart the primary and all replicas: pgo restart mycluster + To restart the primary and all replicas using a rolling update strategy: + pgo restart mycluster --rolling + Or target a specific instance within the cluster: pgo restart mycluster --target=mycluster-abcd @@ -29,13 +32,14 @@ pgo restart [flags] --no-prompt No command line confirmation. -o, --output string The output format. Supported types are: "json" --query Prints the list of instances that can be restarted. + --rolling Performs a rolling restart. Cannot be used with other flags. --target stringArray The instance that will be restarted. ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -49,4 +53,4 @@ pgo restart [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_restore.md b/docs/content/pgo-client/reference/pgo_restore.md index 2d8561c64d..4894154e73 100644 --- a/docs/content/pgo-client/reference/pgo_restore.md +++ b/docs/content/pgo-client/reference/pgo_restore.md @@ -23,8 +23,9 @@ pgo restore [flags] --backup-type string The type of backup to restore from, default is pgbackrest. Valid types are pgbackrest or pgdump. -h, --help help for restore --no-prompt No command line confirmation. + --node-affinity-type string Sets the type of node affinity to use. Can be either preferred (default) or required. Must be used with --node-label --node-label string The node label (key=value) to use when scheduling the restore job, and in the case of a pgBackRest restore, also the new (i.e. restored) primary deployment. If not set, any node is used. - --pgbackrest-storage-type string The type of storage to use for a pgBackRest restore. Either "local", "s3". (default "local") + --pgbackrest-storage-type string The type of storage to use for a pgBackRest restore. Either "posix", "s3". (default "posix") -d, --pgdump-database string The name of the database pgdump will restore. (default "postgres") --pitr-target string The PITR target, being a PostgreSQL timestamp such as '2018-08-13 11:25:42.582117-04'. ``` @@ -32,7 +33,7 @@ pgo restore [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -46,4 +47,4 @@ pgo restore [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_scale.md b/docs/content/pgo-client/reference/pgo_scale.md index 684d506cc8..edcd61aab5 100644 --- a/docs/content/pgo-client/reference/pgo_scale.md +++ b/docs/content/pgo-client/reference/pgo_scale.md @@ -18,19 +18,23 @@ pgo scale [flags] ### Options ``` - --ccp-image-tag string The CCPImageTag to use for cluster creation. If specified, overrides the .pgo.yaml setting. - -h, --help help for scale - --no-prompt No command line confirmation. - --node-label string The node label (key) to use in placing the replica database. If not set, any node is used. - --replica-count int The replica count to apply to the clusters. (default 1) - --service-type string The service type to use in the replica Service. If not set, the default in pgo.yaml will be used. - --storage-config string The name of a Storage config in pgo.yaml to use for the replica storage. + --ccp-image-tag string The CCPImageTag to use for cluster creation. If specified, overrides the .pgo.yaml setting. + -h, --help help for scale + --no-prompt No command line confirmation. + --node-affinity-type string Sets the type of node affinity to use. Can be either preferred (default) or required. Must be used with --node-label + --node-label string The node label (key) to use in placing the replica database. If not set, any node is used. + --replica-count int The replica count to apply to the clusters. (default 1) + --service-type string The service type to use in the replica Service. If not set, the default in pgo.yaml will be used. + --storage-config string The name of a Storage config in pgo.yaml to use for the replica storage. + --toleration strings Set Pod tolerations for each PostgreSQL instance in a cluster. + The general format is "key=value:Effect" + For example, to add an Exists and an Equals toleration: "--toleration=ssd:NoSchedule,zone=east:NoSchedule" ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -44,4 +48,4 @@ pgo scale [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_scaledown.md b/docs/content/pgo-client/reference/pgo_scaledown.md index deef6123d9..157e24ba12 100644 --- a/docs/content/pgo-client/reference/pgo_scaledown.md +++ b/docs/content/pgo-client/reference/pgo_scaledown.md @@ -32,7 +32,7 @@ pgo scaledown [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -46,4 +46,4 @@ pgo scaledown [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show.md b/docs/content/pgo-client/reference/pgo_show.md index b71032f999..5a4e47350e 100644 --- a/docs/content/pgo-client/reference/pgo_show.md +++ b/docs/content/pgo-client/reference/pgo_show.md @@ -33,7 +33,7 @@ pgo show [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -60,4 +60,4 @@ pgo show [flags] * [pgo show user](/pgo-client/reference/pgo_show_user/) - Show user information * [pgo show workflow](/pgo-client/reference/pgo_show_workflow/) - Show workflow information -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_backup.md b/docs/content/pgo-client/reference/pgo_show_backup.md index a15c426d54..adbb331666 100644 --- a/docs/content/pgo-client/reference/pgo_show_backup.md +++ b/docs/content/pgo-client/reference/pgo_show_backup.md @@ -25,7 +25,7 @@ pgo show backup [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo show backup [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_cluster.md b/docs/content/pgo-client/reference/pgo_show_cluster.md index 291d3b6ff6..b21ad78830 100644 --- a/docs/content/pgo-client/reference/pgo_show_cluster.md +++ b/docs/content/pgo-client/reference/pgo_show_cluster.md @@ -29,7 +29,7 @@ pgo show cluster [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -43,4 +43,4 @@ pgo show cluster [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_config.md b/docs/content/pgo-client/reference/pgo_show_config.md index ae3cb75059..65e8efea4f 100644 --- a/docs/content/pgo-client/reference/pgo_show_config.md +++ b/docs/content/pgo-client/reference/pgo_show_config.md @@ -24,7 +24,7 @@ pgo show config [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -38,4 +38,4 @@ pgo show config [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_namespace.md b/docs/content/pgo-client/reference/pgo_show_namespace.md index 9794a12bac..40ce90c983 100644 --- a/docs/content/pgo-client/reference/pgo_show_namespace.md +++ b/docs/content/pgo-client/reference/pgo_show_namespace.md @@ -25,7 +25,7 @@ pgo show namespace [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo show namespace [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_pgadmin.md b/docs/content/pgo-client/reference/pgo_show_pgadmin.md index 73574045aa..f4ebb5d617 100644 --- a/docs/content/pgo-client/reference/pgo_show_pgadmin.md +++ b/docs/content/pgo-client/reference/pgo_show_pgadmin.md @@ -28,7 +28,7 @@ pgo show pgadmin [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -42,4 +42,4 @@ pgo show pgadmin [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_pgbouncer.md b/docs/content/pgo-client/reference/pgo_show_pgbouncer.md index 0a977097a8..707782ccd5 100644 --- a/docs/content/pgo-client/reference/pgo_show_pgbouncer.md +++ b/docs/content/pgo-client/reference/pgo_show_pgbouncer.md @@ -28,7 +28,7 @@ pgo show pgbouncer [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -42,4 +42,4 @@ pgo show pgbouncer [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_pgorole.md b/docs/content/pgo-client/reference/pgo_show_pgorole.md index f8241d4e33..ca967aaeb6 100644 --- a/docs/content/pgo-client/reference/pgo_show_pgorole.md +++ b/docs/content/pgo-client/reference/pgo_show_pgorole.md @@ -25,7 +25,7 @@ pgo show pgorole [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo show pgorole [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_pgouser.md b/docs/content/pgo-client/reference/pgo_show_pgouser.md index 4881d2f1fb..1ad60b4303 100644 --- a/docs/content/pgo-client/reference/pgo_show_pgouser.md +++ b/docs/content/pgo-client/reference/pgo_show_pgouser.md @@ -25,7 +25,7 @@ pgo show pgouser [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo show pgouser [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_policy.md b/docs/content/pgo-client/reference/pgo_show_policy.md index ddeaedbd09..6303392491 100644 --- a/docs/content/pgo-client/reference/pgo_show_policy.md +++ b/docs/content/pgo-client/reference/pgo_show_policy.md @@ -26,7 +26,7 @@ pgo show policy [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -40,4 +40,4 @@ pgo show policy [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_pvc.md b/docs/content/pgo-client/reference/pgo_show_pvc.md index ea8312dd36..16d7d3f457 100644 --- a/docs/content/pgo-client/reference/pgo_show_pvc.md +++ b/docs/content/pgo-client/reference/pgo_show_pvc.md @@ -26,7 +26,7 @@ pgo show pvc [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -40,4 +40,4 @@ pgo show pvc [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_schedule.md b/docs/content/pgo-client/reference/pgo_show_schedule.md index 7d39ac4cff..eb9033fc1c 100644 --- a/docs/content/pgo-client/reference/pgo_show_schedule.md +++ b/docs/content/pgo-client/reference/pgo_show_schedule.md @@ -29,7 +29,7 @@ pgo show schedule [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -43,4 +43,4 @@ pgo show schedule [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_user.md b/docs/content/pgo-client/reference/pgo_show_user.md index 7dcdfe2b31..653fda1ede 100644 --- a/docs/content/pgo-client/reference/pgo_show_user.md +++ b/docs/content/pgo-client/reference/pgo_show_user.md @@ -31,7 +31,7 @@ pgo show user [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -45,4 +45,4 @@ pgo show user [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_show_workflow.md b/docs/content/pgo-client/reference/pgo_show_workflow.md index 28d5f11666..722ad453ea 100644 --- a/docs/content/pgo-client/reference/pgo_show_workflow.md +++ b/docs/content/pgo-client/reference/pgo_show_workflow.md @@ -24,7 +24,7 @@ pgo show workflow [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -38,4 +38,4 @@ pgo show workflow [flags] * [pgo show](/pgo-client/reference/pgo_show/) - Show the description of a cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_status.md b/docs/content/pgo-client/reference/pgo_status.md index 21a4a84464..f25a662bd1 100644 --- a/docs/content/pgo-client/reference/pgo_status.md +++ b/docs/content/pgo-client/reference/pgo_status.md @@ -25,7 +25,7 @@ pgo status [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo status [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_test.md b/docs/content/pgo-client/reference/pgo_test.md index 36671b5f6e..690efc389d 100644 --- a/docs/content/pgo-client/reference/pgo_test.md +++ b/docs/content/pgo-client/reference/pgo_test.md @@ -29,7 +29,7 @@ pgo test [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -43,4 +43,4 @@ pgo test [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_update.md b/docs/content/pgo-client/reference/pgo_update.md index 669c841701..d54fe5dcce 100644 --- a/docs/content/pgo-client/reference/pgo_update.md +++ b/docs/content/pgo-client/reference/pgo_update.md @@ -9,8 +9,8 @@ Update a pgouser, pgorole, or cluster The update command allows you to update a pgouser, pgorole, or cluster. For example: - pgo update cluster --selector=name=mycluster --autofail=false - pgo update cluster --all --autofail=true + pgo update cluster --selector=name=mycluster --disable-autofail + pgo update cluster --all --enable-autofail pgo update namespace mynamespace pgo update pgbouncer mycluster --rotate-password pgo update pgorole somerole --pgorole-permission="Cat" @@ -33,7 +33,7 @@ pgo update [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -53,4 +53,4 @@ pgo update [flags] * [pgo update pgouser](/pgo-client/reference/pgo_update_pgouser/) - Update a pgouser * [pgo update user](/pgo-client/reference/pgo_update_user/) - Update a PostgreSQL user -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_update_cluster.md b/docs/content/pgo-client/reference/pgo_update_cluster.md index 007c34b0fc..ba9b04e683 100644 --- a/docs/content/pgo-client/reference/pgo_update_cluster.md +++ b/docs/content/pgo-client/reference/pgo_update_cluster.md @@ -9,7 +9,7 @@ Update a PostgreSQL cluster Update a PostgreSQL cluster. For example: - pgo update cluster mycluster --autofail=false + pgo update cluster mycluster --disable-autofail pgo update cluster mycluster myothercluster --disable-autofail pgo update cluster --selector=name=mycluster --disable-autofail pgo update cluster --all --enable-autofail @@ -38,12 +38,17 @@ pgo update cluster [flags] --cpu string Set the number of millicores to request for the CPU, e.g. "100m" or "0.1". --cpu-limit string Set the number of millicores to limit for the CPU, e.g. "100m" or "0.1". --disable-autofail Disables autofail capabitilies in the cluster. + --disable-metrics Disable the metrics collection sidecar. May cause brief downtime. + --disable-pgbadger Disable the pgBadger sidecar. May cause brief downtime. --enable-autofail Enables autofail capabitilies in the cluster. + --enable-metrics Enable the metrics collection sidecar. May cause brief downtime. + --enable-pgbadger Enable the pgBadger sidecar. May cause brief downtime. --enable-standby Enables standby mode in the cluster(s) specified. --exporter-cpu string Set the number of millicores to request for CPU for the Crunchy Postgres Exporter sidecar container, e.g. "100m" or "0.1". --exporter-cpu-limit string Set the number of millicores to limit for CPU for the Crunchy Postgres Exporter sidecar container, e.g. "100m" or "0.1". --exporter-memory string Set the amount of memory to request for the Crunchy Postgres Exporter sidecar container. --exporter-memory-limit string Set the amount of memory to limit for the Crunchy Postgres Exporter sidecar container. + --exporter-rotate-password Used to rotate the password for the metrics collection agent. -h, --help help for cluster --memory string Set the amount of RAM to request, e.g. 1GiB. --memory-limit string Set the amount of RAM to limit, e.g. 1GiB. @@ -54,6 +59,7 @@ pgo update cluster [flags] --pgbackrest-memory-limit string Set the amount of memory to limit for the pgBackRest repository. --promote-standby Disables standby mode (if enabled) and promotes the cluster(s) specified. -s, --selector string The selector to use for cluster filtering. + --service-type string The Service type to use for the PostgreSQL cluster. If not set, the pgo.yaml default will be used. --shutdown Shutdown the database cluster if it is currently running. --startup Restart the database cluster if it is currently shutdown. --tablespace strings Add a PostgreSQL tablespace on the cluster, e.g. "name=ts1:storageconfig=nfsstorage". The format is a key/value map that is delimited by "=" and separated by ":". The following parameters are available: @@ -65,12 +71,17 @@ pgo update cluster [flags] For example, to create a tablespace with the NFS storage configuration with a PVC of size 10GiB: --tablespace=name=ts1:storageconfig=nfsstorage:pvcsize=10Gi + --toleration strings Set Pod tolerations for each PostgreSQL instance in a cluster. + The general format is "key=value:Effect" + For example, to add an Exists and an Equals toleration: "--toleration=ssd:NoSchedule,zone=east:NoSchedule" + A toleration can be removed by adding a "-" to the end, for example: + --toleration=ssd:NoSchedule- ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -84,4 +95,4 @@ pgo update cluster [flags] * [pgo update](/pgo-client/reference/pgo_update/) - Update a pgouser, pgorole, or cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_update_namespace.md b/docs/content/pgo-client/reference/pgo_update_namespace.md index 396cb9d30b..d176bd9dd4 100644 --- a/docs/content/pgo-client/reference/pgo_update_namespace.md +++ b/docs/content/pgo-client/reference/pgo_update_namespace.md @@ -23,7 +23,7 @@ pgo update namespace [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -37,4 +37,4 @@ pgo update namespace [flags] * [pgo update](/pgo-client/reference/pgo_update/) - Update a pgouser, pgorole, or cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_update_pgbouncer.md b/docs/content/pgo-client/reference/pgo_update_pgbouncer.md index ec51137fd2..b97a9f60c4 100644 --- a/docs/content/pgo-client/reference/pgo_update_pgbouncer.md +++ b/docs/content/pgo-client/reference/pgo_update_pgbouncer.md @@ -30,12 +30,13 @@ pgo update pgbouncer [flags] --replicas int32 Set the total number of pgBouncer instances to deploy. If not set, defaults to 1. --rotate-password Used to rotate the pgBouncer service account password. Can cause interruption of service. -s, --selector string The selector to use for cluster filtering. + --service-type string The Service type to use for pgBouncer. ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -49,4 +50,4 @@ pgo update pgbouncer [flags] * [pgo update](/pgo-client/reference/pgo_update/) - Update a pgouser, pgorole, or cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_update_pgorole.md b/docs/content/pgo-client/reference/pgo_update_pgorole.md index 3c1706b76a..448a4673dc 100644 --- a/docs/content/pgo-client/reference/pgo_update_pgorole.md +++ b/docs/content/pgo-client/reference/pgo_update_pgorole.md @@ -25,7 +25,7 @@ pgo update pgorole [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo update pgorole [flags] * [pgo update](/pgo-client/reference/pgo_update/) - Update a pgouser, pgorole, or cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_update_pgouser.md b/docs/content/pgo-client/reference/pgo_update_pgouser.md index 2991939cd7..a460b11ece 100644 --- a/docs/content/pgo-client/reference/pgo_update_pgouser.md +++ b/docs/content/pgo-client/reference/pgo_update_pgouser.md @@ -30,7 +30,7 @@ pgo update pgouser [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -44,4 +44,4 @@ pgo update pgouser [flags] * [pgo update](/pgo-client/reference/pgo_update/) - Update a pgouser, pgorole, or cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_update_user.md b/docs/content/pgo-client/reference/pgo_update_user.md index 25c18b73da..5678720621 100644 --- a/docs/content/pgo-client/reference/pgo_update_user.md +++ b/docs/content/pgo-client/reference/pgo_update_user.md @@ -32,27 +32,28 @@ pgo update user [flags] ### Options ``` - --all all clusters. - --disable-login Disables a PostgreSQL user from being able to log into the PostgreSQL cluster. - --enable-login Enables a PostgreSQL user to be able to log into the PostgreSQL cluster. - --expire-user Performs expiring a user if set to true. - --expired int Updates passwords that will expire in X days using an autogenerated password. - -h, --help help for user - -o, --output string The output format. Supported types are: "json" - --password string Specifies the user password when updating a user password or creating a new user. If --rotate-password is set as well, --password takes precedence. - --password-length int If no password is supplied, sets the length of the automatically generated password. Defaults to the value set on the server. - --password-type string The type of password hashing to use.Choices are: (md5, scram-sha-256). This only takes effect if the password is being changed. (default "md5") - --rotate-password Rotates the user's password with an automatically generated password. The length of the password is determine by either --password-length or the value set on the server, in that order. - -s, --selector string The selector to use for cluster filtering. - --username string Updates the postgres user on selective clusters. - --valid-always Sets a password to never expire based on expiration time. Takes precedence over --valid-days - --valid-days int Sets the number of days that a password is valid. Defaults to the server value. + --all all clusters. + --disable-login Disables a PostgreSQL user from being able to log into the PostgreSQL cluster. + --enable-login Enables a PostgreSQL user to be able to log into the PostgreSQL cluster. + --expire-user Performs expiring a user if set to true. + --expired int Updates passwords that will expire in X days using an autogenerated password. + -h, --help help for user + -o, --output string The output format. Supported types are: "json" + --password string Specifies the user password when updating a user password or creating a new user. If --rotate-password is set as well, --password takes precedence. + --password-length int If no password is supplied, sets the length of the automatically generated password. Defaults to the value set on the server. + --password-type string The type of password hashing to use.Choices are: (md5, scram-sha-256). This only takes effect if the password is being changed. (default "md5") + --rotate-password Rotates the user's password with an automatically generated password. The length of the password is determine by either --password-length or the value set on the server, in that order. + -s, --selector string The selector to use for cluster filtering. + --set-system-account-password Allows for a system account password to be set. + --username string Updates the postgres user on selective clusters. + --valid-always Sets a password to never expire based on expiration time. Takes precedence over --valid-days + --valid-days int Sets the number of days that a password is valid. Defaults to the server value. ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -66,4 +67,4 @@ pgo update user [flags] * [pgo update](/pgo-client/reference/pgo_update/) - Update a pgouser, pgorole, or cluster -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_upgrade.md b/docs/content/pgo-client/reference/pgo_upgrade.md index 78d787f6f0..7ed0c642c9 100644 --- a/docs/content/pgo-client/reference/pgo_upgrade.md +++ b/docs/content/pgo-client/reference/pgo_upgrade.md @@ -7,10 +7,10 @@ Perform a cluster upgrade. ### Synopsis -UPGRADE allows you to perform a comprehensive PGCluster upgrade - (for use after performing a Postgres Operator upgrade). +UPGRADE allows you to perform a comprehensive PGCluster upgrade + (for use after performing a Postgres Operator upgrade). For example: - + pgo upgrade mycluster Upgrades the cluster for use with the upgraded Postgres Operator version. @@ -24,12 +24,13 @@ pgo upgrade [flags] --ccp-image-tag string The image tag to use for cluster creation. If specified, it overrides the default configuration setting and disables tag validation checking. -h, --help help for upgrade --ignore-validation Disables version checking against the image tags when performing an cluster upgrade. + --no-prompt No command line confirmation. ``` ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -43,4 +44,4 @@ pgo upgrade [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_version.md b/docs/content/pgo-client/reference/pgo_version.md index 5bf407bc73..cdadd2cf95 100644 --- a/docs/content/pgo-client/reference/pgo_version.md +++ b/docs/content/pgo-client/reference/pgo_version.md @@ -25,7 +25,7 @@ pgo version [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo version [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/pgo-client/reference/pgo_watch.md b/docs/content/pgo-client/reference/pgo_watch.md index 0f3e721545..35416c3b1a 100644 --- a/docs/content/pgo-client/reference/pgo_watch.md +++ b/docs/content/pgo-client/reference/pgo_watch.md @@ -25,7 +25,7 @@ pgo watch [flags] ### Options inherited from parent commands ``` - --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. + --apiserver-url string The URL for the PostgreSQL Operator apiserver that will process the request from the pgo client. Note that the URL should **not** end in a '/'. --debug Enable additional output for debugging. --disable-tls Disable TLS authentication to the Postgres Operator. --exclude-os-trust Exclude CA certs from OS default trust store @@ -39,4 +39,4 @@ pgo watch [flags] * [pgo](/pgo-client/reference/pgo/) - The pgo command line interface. -###### Auto generated by spf13/cobra on 1-Oct-2020 +###### Auto generated by spf13/cobra on 14-Jan-2021 diff --git a/docs/content/quickstart/_index.md b/docs/content/quickstart/_index.md index dd29d467be..a74f532e24 100644 --- a/docs/content/quickstart/_index.md +++ b/docs/content/quickstart/_index.md @@ -5,26 +5,27 @@ draft: false weight: 10 --- -# PostgreSQL Operator Quickstart +# PGO: PostgreSQL Operator Quickstart -Can't wait to try out the PostgreSQL Operator? Let us show you the quickest possible path to getting up and running. +Can't wait to try out PGO, the Postgres Operator from Crunchy Data? Let us show +you the quickest possible path to getting up and running. -There are two paths to quickly get you up and running with the PostgreSQL Operator: +There are two paths to quickly get you up and running with PGO: -- [Installation via the PostgreSQL Operator Installer](#postgresql-operator-installer) +- [Installation via the Postgres Operator Installer](#postgresql-operator-installer) - Installation via a Marketplace - Installation via [Operator Lifecycle Manager]({{< relref "/installation/other/operator-hub.md" >}}) - Installation via [Google Cloud Marketplace]({{< relref "/installation/other/google-cloud-marketplace.md" >}}) Marketplaces can help you get more quickly started in your environment as they provide a mostly automated process, but there are a few steps you will need to take to ensure you can fully utilize your PostgreSQL Operator environment. You can find out more information about how to get started with one of those installers in the [Installation]({{< relref "/installation/_index.md" >}}) section. -# PostgreSQL Operator Installer +# Postgres Operator Installer Below will guide you through the steps for installing and using the PostgreSQL Operator using an installer that works with Ansible. ## Installation -### Install the PostgreSQL Operator +### Install PGO: the PostgreSQL Operator On environments that have a [default storage class](https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/) set up (which is most modern Kubernetes environments), the below command should work: @@ -39,14 +40,14 @@ If your install is unsuccessful, you may need to modify your configuration. Plea ### Install the `pgo` Client -During or after the installation of the PostgreSQL Operator, download the `pgo` client set up script. This will help set up your local environment for using the PostgreSQL Operator: +During or after the installation of PGO: the Postgres Operator, download the `pgo` client set up script. This will help set up your local environment for using the Postgres Operator: ``` curl https://raw.githubusercontent.com/CrunchyData/postgres-operator/v{{< param operatorVersion >}}/installers/kubectl/client-setup.sh > client-setup.sh chmod +x client-setup.sh ``` -When the PostgreSQL Operator is done installing, run the client setup script: +When the Postgres Operator is done installing, run the client setup script: ``` ./client-setup.sh @@ -83,9 +84,9 @@ source ~/.bashrc ### Post-Installation Setup -Below are a few steps to check if the PostgreSQL Operator is up and running. +Below are a few steps to check if PGO: the Postgres Operator is up and running. -By default, the PostgreSQL Operator installs into a namespace called `pgo`. First, see that the Kubernetes Deployment of the Operator exists and is healthy: +By default, PGO installs into a namespace called `pgo`. First, see that the Kubernetes Deployment of PGO exists and is healthy: ``` kubectl -n pgo get deployments @@ -111,7 +112,7 @@ NAME READY STATUS RESTARTS AGE postgres-operator-56d6ccb97-tmz7m 4/4 Running 0 2m ``` -Finally, let's see if we can connect to the PostgreSQL Operator from the `pgo` command-line client. The Ansible installer installs the `pgo` command line client into your environment, along with the username/password file that allows you to access the PostgreSQL Operator. In order to communicate with the PostgreSQL Operator API server, you will first need to set up a [port forward](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to your local environment. +Finally, let's see if we can connect to the Postgres Operator from the `pgo` command-line client. The Ansible installer installs the `pgo` command line client into your environment, along with the username/password file that allows you to access the PostgreSQL Operator. In order to communicate with the PostgreSQL Operator API server, you will first need to set up a [port forward](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to your local environment. In a new console window, run the following command to set up a port forward: @@ -134,7 +135,7 @@ pgo-apiserver version {{< param operatorVersion >}} ## Create a PostgreSQL Cluster -The quickstart installation method creates a namespace called `pgo` where the PostgreSQL Operator manages PostgreSQL clusters. Try creating a PostgreSQL cluster called `hippo`: +The quickstart installation method creates a namespace called `pgo` where PGO, the Postgres Operator, manages PostgreSQL clusters. Try creating a PostgreSQL cluster called `hippo`: ``` pgo create cluster -n pgo hippo @@ -155,7 +156,7 @@ created Pgcluster hippo workflow id 1cd0d225-7cd4-4044-b269-aa7bedae219b ``` -This will create a PostgreSQL cluster named `hippo`. It may take a few moments for the cluster to be provisioned. You can see the status of this cluster using the [`pgo test`]({{< relref "pgo-client/reference/pgo_test.md" >}}) command: +This will create a Postgres cluster named `hippo`. It may take a few moments for the cluster to be provisioned. You can see the status of this cluster using the [`pgo test`]({{< relref "pgo-client/reference/pgo_test.md" >}}) command: ``` pgo test -n pgo hippo @@ -175,7 +176,7 @@ The `pgo test` command provides you the basic information you need to connect to ## Connect to a PostgreSQL Cluster -By default, the PostgreSQL Operator creates a database inside the cluster with the same name of the cluster, in this case, `hippo`. Below demonstrates how we can connect to `hippo`. +By default, PGO creates a database inside the cluster with the same name of the cluster, in this case, `hippo`. Below demonstrates how we can connect to `hippo`. ### How Users Work @@ -193,7 +194,7 @@ CLUSTER USERNAME PASSWORD EXPIRES STATUS ERROR hippo testuser datalake never ok ``` -To get the information about all PostgreSQL users that the PostgreSQL Operator is managing, you will need to use the `--show-system-accounts` flag: +To get the information about all PostgreSQL users that PGO is managing, you will need to use the `--show-system-accounts` flag: ``` pgo show user -n pgo hippo --show-system-accounts @@ -217,7 +218,7 @@ The `primaryuser` is the used for replication and [high availability]({{< relref Let's see how we can connect to `hippo` using [`psql`](https://www.postgresql.org/docs/current/app-psql.html), the command-line tool for accessing PostgreSQL. Ensure you have [installed the `psql` client](https://www.crunchydata.com/developers/download-postgres/binaries/postgresql12). -The PostgreSQL Operator creates a service with the same name as the cluster. See for yourself! Get a list of all of the Services available in the `pgo` namespace: +PGO, the Postgres Operator, creates a service with the same name as the cluster. See for yourself! Get a list of all of the Services available in the `pgo` namespace: ``` kubectl -n pgo get svc @@ -293,7 +294,7 @@ For more information, please see the section on [pgAdmin 4]({{< relref "architec Some Kubernetes environments may require you to customize the configuration for the PostgreSQL Operator installer. The below provides a guide on the common parameters that require modification, though this may vary based on your installation. For a full reference, please visit the [Installation]({{< relref "/installation/_index.md" >}}) section. -If you already attempted to install the PostgreSQL Operator and that failed, the easiest way to clean up that installation is to delete the [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) that you attempted to install the PostgreSQL Operator into. **Note: This deletes all of the other objects in the Namespace, so please be sure this is OK!** +If you already attempted to install PGO and that failed, the easiest way to clean up that installation is to delete the [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) that you attempted to install the Postgres Operator into. **Note: This deletes all of the other objects in the Namespace, so please be sure this is OK!** To delete the namespace, you can run the following command: @@ -301,15 +302,15 @@ To delete the namespace, you can run the following command: kubectl delete namespace pgo ``` -#### Get the PostgreSQL Operator Installer Manifest +#### Get the Postgres Operator Installer Manifest -You will need to download the PostgreSQL Operator Installer manifest to your environment, which you can do with the following command: +You will need to download the Postgres Operator Installer manifest to your environment, which you can do with the following command: ``` curl https://raw.githubusercontent.com/CrunchyData/postgres-operator/v{{< param operatorVersion >}}/installers/kubectl/postgres-operator.yml > postgres-operator.yml ``` -#### Configure the PostgreSQL Operator Installer +#### Configure the Postgres Operator Installer There are many [configuration parameters]({{< relref "/installation/configuration.md">}}) to help you fine tune your installation, but there are a few that you may want to change to get the PostgreSQL Operator to run in your environment. Open up the `postgres-operator.yml` file and edit a few variables. @@ -326,11 +327,11 @@ primary_storage: "nfsstorage" replica_storage: "nfsstorage" ``` -If you are using either Openshift or CodeReady Containers and you have a `restricted` Security Context Constraint, you will need to set `disable_fsgroup` to `true` in order to deploy the PostgreSQL Operator. +In OpenShift and CodeReady Containers, PGO will automatically set `disable_fsgroup` to `true` so that it will deploy PostgreSQL clusters correctly under the `restricted` Security Context Constraint (SCC). Though we recommend using `restricted`, if you are using the `anyuid` SCC, you will need to set `disable_fsgroup` to `false` in order to deploy the PostgreSQL Operator. For a full list of available storage types that can be used with this installation method, please review the [configuration parameters]({{< relref "/installation/configuration.md">}}). -When you are done editing the file, you can install the PostgreSQL Operator by running the following commands: +When you are done editing the file, you can install PGO by running the following commands: ``` kubectl create namespace pgo diff --git a/docs/content/releases/4.5.1.md b/docs/content/releases/4.5.1.md new file mode 100644 index 0000000000..eeed22c013 --- /dev/null +++ b/docs/content/releases/4.5.1.md @@ -0,0 +1,38 @@ +--- +title: "4.5.1" +date: +draft: false +weight: 69 +--- + +Crunchy Data announces the release of the PostgreSQL Operator 4.5.1 on November 13, 2020. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +PostgreSQL Operator 4.5.1 release includes the following software versions upgrades: + +- [PostgreSQL](https://www.postgresql.org) is now at versions 13.1, 12.5, 11.10, 10.15, 9.6.20, and 9.5.24. +- [Patroni](https://patroni.readthedocs.io/) is now at version 2.0.1. +- PL/Perl can now be used in the PostGIS-enabled containers. + +## Changes + +- Simplified creation of a PostgreSQL cluster from a `pgcluster` resource. A user no longer has to provide a pgBackRest repository Secret: the Postgres Operator will now automatically generate this. +- The exposed ports for Services associated with a cluster is now available from the `pgo show cluster` command. +- If the `pgo-config` ConfigMap is not created during the installation of the Postgres Operator, the Postgres Operator will generate one when it initializes. +- Providing a value for `pgo_admin_password` in the installer is now optional. If no value is provided, the password for the initial administrative user is randomly generated. +- Added an example for how to create a PostgreSQL cluster that uses S3 for pgBackRest backups via a custom resource. + +## Fixes + +- Fix readiness check for a standby leader. Previously, the standby leader would not report as ready, even though it was. Reported by Alec Rooney (@alrooney). +- Proper determination if a `pgcluster` custom resource creation has been processed by its corresponding Postgres Operator controller. This prevents the custom resource from being run by the creation logic multiple times. +- Prevent `initdb` (cluster reinitialization) from occurring if the PostgreSQL container cannot initialize while bootstrapping from an existing PGDATA directory. +- Fix issue with UBI 8 / CentOS 8 when running a pgBackRest bootstrap or restore job, where duplicate "repo types" could be set. Specifically, the ensures the name of the repo type is set via the `PGBACKREST_REPO1_TYPE` environmental variable. Reported by Alec Rooney (@alrooney). +- Ensure external WAL and Tablespace PVCs are fully recreated during a restore. Reported by (@aurelien43). +- Ensure `pgo show backup` will work regardless of state of any of the PostgreSQL clusters. This pulls the information directly from the pgBackRest Pod itself. Reported by (@saltenhub). +- Ensure that sidecars (e.g. metrics collection, pgAdmin 4, pgBouncer) are deployable when using the PostGIS-enabled PostgreSQL image. Reported by Jean-Denis Giguère (@jdenisgiguere). +- Allow for special characters in pgBackRest environmental variables. Reported by (@SockenSalat). +- Ensure password for the `pgbouncer` administrative user stays synchronized between an existing Kubernetes Secret and PostgreSQL should the pgBouncer be recreated. +- When uninstalling an instance of the Postgres Operator in a Kubernetes cluster that has multiple instances of the Postgres Operator, ensure that only the requested instance to be uninstalled is the one that's uninstalled. +- The logger no longer defaults to using a log level of `DEBUG`. diff --git a/docs/content/releases/4.6.0.md b/docs/content/releases/4.6.0.md new file mode 100644 index 0000000000..d73caa8eea --- /dev/null +++ b/docs/content/releases/4.6.0.md @@ -0,0 +1,249 @@ +--- +title: "4.6.0" +date: +draft: false +weight: 60 +--- + +Crunchy Data announces the release of the PostgreSQL Operator 4.6.0 on January 22, 2021. You can get started with the PostgreSQL Operator with the following commands: + +``` +kubectl create namespace pgo +kubectl apply -f https://raw.githubusercontent.com/CrunchyData/postgres-operator/v4.6.0/installers/kubectl/postgres-operator.yml +``` + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +The PostgreSQL Operator 4.6.0 release includes the following software versions upgrades: + +- [pgBackRest](https://pgbackrest.org/) is now at version 2.31 +- [pgnodemx](https://github.com/CrunchyData/pgnodemx) is now at version 1.0.3 +- [Patroni](https://patroni.readthedocs.io/) is now at version 2.0.1 +- [pgBadger](https://github.com/darold/pgbadger) is now at 11.4 + +The monitoring stack for the PostgreSQL Operator uses upstream components as opposed to repackaging them. These are specified as part of the [PostgreSQL Operator Installer](https://access.crunchydata.com/documentation/postgres-operator/latest/installation/postgres-operator/). We have tested this release with the following versions of each component: + +- Prometheus: 2.24.0 +- Grafana: 6.7.5 +- Alertmanager: 0.21.0 + +This release of the PostgreSQL Operator drops support for PostgreSQL 9.5, which goes EOL in February 2021. + +PostgreSQL Operator is tested against Kubernetes 1.17 - 1.20, OpenShift 3.11, OpenShift 4.4+, Google Kubernetes Engine (GKE), Amazon EKS, Microsoft AKS, and VMware Enterprise PKS 1.3+, and works on other Kubernetes distributions as well. + +## Major Features + +### Rolling Updates + +During the lifecycle of a PostgreSQL cluster, there are certain events that may require a planned restart, such as an update to a "restart required" PostgreSQL configuration setting (e.g. [`shared_buffers`](https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS)) or a change to a Kubernetes Deployment template (e.g. [changing the memory request](https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/customize-cluster/#customize-cpu-memory)). Restarts can be disruptive in a high availability deployment, which is why many setups employ a ["rolling update" strategy](https://kubernetes.io/docs/tutorials/kubernetes-basics/update/update-intro/) (aka a "rolling restart") to minimize or eliminate downtime during a planned restart. + +Because PostgreSQL is a stateful application, a simple rolling restart strategy will not work: PostgreSQL needs to ensure that there is a primary available that can accept reads and writes. This requires following a method that will minimize the amount of downtime when the primary is taken offline for a restart. + +This release introduces a mechanism for the PostgreSQL Operator to perform rolling updates implicitly on certain operations that change the Deployment templates and explicitly through the [`pgo restart`](https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/reference/pgo_restart/) command with the `--rolling` flag. Some of the operations that will trigger a rolling update include: + +- Memory resource adjustments +- CPU resource adjustments +- Custom annotation changes +- Tablespace additions +- Adding/removing the metrics sidecar to a PostgreSQL cluster + +Please reference the [documentation](https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/#rolling-updates) for more details on [rolling updates](https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/#rolling-updates). + +### Pod Tolerations + +Kubernetes [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) can help with the scheduling of Pods to appropriate Nodes based upon the taint values of said Nodes. For example, a Kubernetes administrator may set taints on Nodes to restrict scheduling to just the database workload, and as such, tolerations must be assigned to Pods to ensure they can actually be scheduled on thos nodes. + +This release introduces the ability to assign tolerations to PostgreSQL clusters managed by the PostgreSQL Operator. Tolerations can be assigned to every instance in the cluster via the `tolerations` attribute on a `pgclusters.crunchydata.com` custom resource, or to individual instances using the `tolerations` attribute on a `pgreplicas.crunchydata.com` custom resource. + +Both the [`pgo create cluster`](https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/reference/pgo_create_cluster/) and [`pgo scale`](https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/reference/pgo_scale/) commands support the `--toleration` flag, which can be used to add one or more tolerations to a cluster. Values accepted by the `--toleration` flag use the following format: + +``` +rule:Effect +``` + +where a `rule` can represent existence (e.g. `key`) or equality (`key=value`) and `Effect` is one of `NoSchedule`, `PreferNoSchedule`, or `NoExecute`, e.g: + +``` +pgo create cluster hippo \ + --toleration=ssd:NoSchedule \ + --toleration=zone=east:NoSchedule +``` + +Tolerations can also be added and removed from an existing cluster using the [`pgo update cluster`](https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/reference/pgo_update_cluster/) , command e.g: + +``` +pgo update cluster hippo \ + --toleration=zone=west:NoSchedule \ + --toleration=zone=east:NoSchedule- +``` + +or by modifying the `pgclusters.crunchydata.com` custom resource directly. + +For more information on how tolerations work, please refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +### Node Affinity Enhancements + +Node affinity has been a feature of the PostgreSQL Operator for a long time but has received some significant improvements in this release. + +It is now possible to control the node affinity across an entire PostgreSQL cluster as well as individual PostgreSQL instances from a custom resource attribute on the `pgclusters.crunchydata.com` and `pgreplicas.crunchydata.com` CRDs. These attributes use the standard [Kubernetes specifications for node affinity](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/) and should be familiar to users who have had to set this in applications. + +Additionally, this release adds support for both "preferred" and "required" node affinity definitions. Previously, one could achieve required node affinity by modifying a template in the `pgo-config` ConfigMap, but this release makes this process more straightforward. + +This release introduces the `--node-affinity-type` flag for the `pgo create cluster`, `pgo scale`, and `pgo restore` commands that allows one to specify the node affinity type for PostgreSQL clusters and instances. The `--node-affinity-type` flag accepts values of `preferred` (default) and `required`. Each instance in a PostgreSQL cluster will inherit its node affinity type from the cluster (`pgo create cluster`) itself, but the type of an individual instance (`pgo scale`) will supersede that value. + +The `--node-affinity-type` must be combined with the `--node-label` flag. + +### TLS for pgBouncer + +Since 4.3.0, the PostgreSQL Operator has had support for [TLS connections to PostgreSQL clusters](https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/tls/) and an [improved integration with pgBouncer](https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/pgbouncer/), used for connection pooling and state management. However, the integration with pgBouncer did not support TLS directly: it could be achieved through modifying the pgBouncer Deployment template. + +This release brings TLS support for pgBouncer to the PostgreSQL Operator, allowing for communication over TLS between a client and pgBouncer, and pgBouncer and a PostgreSQL server. In other words, the following is now support: + +`Client` <= TLS => `pgBouncer` <= TLS => `PostgreSQL` + +In other words, to use TLS with pgBouncer, all connections from a client to pgBouncer and from pgBouncer to PostgreSQL **must** be over TLS. Effectively, this is "TLS only" mode if connecting via pgBouncer. + +In order to deploy pgBouncer with TLS, the following preconditions must be met: + +- TLS **MUST** be enabled within the PostgreSQL cluster. +- pgBouncer and the PostgreSQL **MUST** share the same certificate authority (CA) bundle. + +You must have a [Kubernetes TLS Secret](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) containing the TLS keypair you would like to use for pgBouncer. + +You can enable TLS for pgBouncer using the following commands: + +- `pgo create pgbouncer --tls-secret`, where `--tls-secret` specifies the location of the TLS keypair to use for pgBouncer. You **must** already have TLS enabled in your PostgreSQL cluster. +- `pgo create cluster --pgbouncer --pgbouncer-tls-secret`, where `--tls-secret` specifies the location of the TLS keypair to use for pgBouncer. You **must** also specify `--server-tls-secret` and `--server-ca-secret`. + +This adds an attribute to the `pgclusters.crunchydata.com` Customer Resource Definition in the `pgBouncer` section called `tlsSecret`, which will store the name of the TLS secret to use for pgBouncer. + +By default, connections coming into pgBouncer have a [PostgreSQL SSL mode](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-PROTECTION) of `require` and connections going into PostgreSQL using `verify-ca`. + +### Enable/Disable Metrics Collection for PostgreSQL Cluster + +A common case is that one creates a PostgreSQL cluster with the Postgres Operator and forget to enable it for monitoring with the `--metrics` flag. Prior to this release, adding the `crunchy-postgres-exporter` to an already running PostgreSQL cluster presented challenges. + +This release brings the `--enable-metrics` and `--disable-metrics` introduces to the [`pgo update cluster`](https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/reference/pgo_update_cluster/) flags that allow for monitoring to be enabled or disabled on an already running PostgreSQL cluster. As this involves modifying Deployment templates, this action triggers a rolling update that is described in the previous section to limit downtime. + +Metrics can also be enabled/disabled using the `exporter` attribute on the `pgclusters.crunchydata.com` custom resource. + +This release also changes the management of the PostgreSQL user that is used to collect the metrics. Similar to [pgBouncer](https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/pgbouncer/), the PostgreSQL Operator fully manages the credentials for the metrics collection user. The `--exporter-rotate-password` flag on [`pgo update cluster`](https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/reference/pgo_update_cluster/) can be used to rotate the metric collection user's credentials. + +## Container Image Reduction & Reorganization + +Advances in Postgres Operator functionality have allowed for a culling of the number of required container images. For example, functionality that had been broken out into individual container images (e.g. `crunchy-pgdump`) is now consolidated within the `crunchy-postgres` and `crunchy-postgres-ha` containers. + +Renamed container images include: + +- `pgo-backrest` => `crunchy-pgbackrest` +- `pgo-backrest-repo` => `crunchy-pgbackrest-repo` + +Removed container images include: + +- `crunchy-admin` +- `crunchy-backrest-restore` +- `crunchy-backup` +- `crunchy-pgbasebackup-restore` +- `crunchy-pgbench` +- `crunchy-pgdump` +- `crunchy-pgrestore` +- `pgo-sqlrunner` +- `pgo-backrest-repo-sync` +- `pgo-backrest-restore` + +These changes also include overall organization and build performance optimizations around the container suite. + +## Breaking Changes + +- [Metrics collection](https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/monitoring/) can now be enabled/disabled using the `exporter` attribute on `pgclusters.crunchydata.com`. The previous method to do so, involving a label buried within a custom resource, no longer works. +- pgBadger can now be enabled/disabled using the `pgBadger` attribute on `pgclusters.crunchydata.com`. The previous method to do so, involving a label buried within a custom resource, no longer works. +- Several additional labels on the `pgclusters.crunchydata.com` CRD that had driven behavior have been moved to attributes. These include: + - `autofail`, which is now represented by the `disableAutofail` attribute. + - `service-type`, which is now represented by the `serviceType` attribute. + - `NodeLabelKey`/`NodeLabelValue`, which is now replaced by the `nodeAffinity` attribute. + - `backrest-storage-type`, which is now represented with the `backrestStorageTypes` attribute. +- The `--labels` flag on `pgo create cluster` is removed and replaced with the `--label`, which can be specified multiple times. The API endpoint for `pgo create cluster` is also modified: labels must now be passed in as a set of key-value pairs. Please see the "Features" section for more details. +- The API endpoints for `pgo label` and `pgo delete label` is modified to accept a set of key/value pairs for the values of the `--label` flag. The API parameter for this is now called `Labels`. +The `pgo upgrade` command will properly moved any data you have in these labels into the correct attributes. You can read more about how to use the various CRD attributes in the [Custom Resources](https://access.crunchydata.com/documentation/postgres-operator/latest/custom-resources/) section of the documentation. +- The `rootsecretname`, `primarysecretname`, and `usersecretname` attributes on the `pgclusters.crunchydata.com` CRD have been removed. Each of these represented managed Secrets. Additionally, if the managed Secrets are not created at cluster creation time, the Operator will now generate these Secrets. +- The `collectSecretName` attribute on `pgclusters.crunchydata.com` has been removed. The Secret for the metrics collection user is now fully managed by the PostgreSQL Operator. +- There are changes to the `exporter.json` and `cluster-deployment.json` templates that reside within the `pgo-config` ConfigMap that could be breaking to those who have customized those templates. This includes removing the opening comma in the `exporter.json` and removing unneeded match labels on the PostgreSQL cluster Deployment. This is resolved by following the [standard upgrade procedure](https://access.crunchydata.com/documentation/postgres-operator/latest/upgrade/).(https://access.crunchydata.com/documentation/postgres-operator/latest/upgrade/), and only affects new clusters and existing clusters that wish to use the enable/disable metric collection feature. +The `affinity.json` entry in the `pgo-config` ConfigMap has been removed in favor of the updated node affinity support. +- Failovers can no longer be controlled by creating a `pgtasks.crunchydata.com` custom resource. +- Remove the `PgMonitorPassword` attribute from `pgo-deployer`. The metric collection user password is managed by the PostgreSQL Operator. +- Policy creation only supports the method of creating the policy from a file/ConfigMap. +- Any pgBackRest variables of the format `PGBACKREST_REPO_` now follow the format `PGBACKREST_REPO1_` to be consistent with what pgBackRest expects. + +## Features + +- [Monitoring](https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/monitoring/) can now be enabled/disabled during the lifetime of a PostgreSQL cluster using the `pgo update --enable-metrics` and `pgo update --disable-metrics` flag. This can also be modified directly on a custom resource. +- The Service Type of a PostgreSQL cluster can now be updated during the lifetime of a cluster with `pgo update cluster --service-type`. This can also be modified directly on a custom resource. +- The Service Type of pgBouncer can now be independently controlled and set with the `--service-type` flag on `pgo create pgbouncer` and `pgo update pgbouncer`. This can also be modified directly on a custom resource. +- [pgBackRest delta restores](https://pgbackrest.org/user-guide.html#restore/option-delta), which can efficiently restore data as it determines which specific files need to be restored from backup, can now be used as part of the cluster creation method with `pgo create cluster --restore-from`. For example, if a cluster is deleted as such: + +``` +pgo delete cluster hippo --keep-data --keep-backups +``` + +It can subsequently be recreated using the delta restore method as such: + +``` +pgo create cluster hippo --restore-from=hippo +``` + +Passing in the [`--process-max`](https://pgbackrest.org/command.html#command-archive-get/category-general/option-process-max) option to `--restore-opts` can help speed up the restore process based upon the amount of CPU you have available. If the delta restore fails, the PostgreSQL Operator will attempt to perform a full restore. + +- `pgo restore` will now first attempt a [pgBackRest delta restore](https://pgbackrest.org/user-guide.html#restore/option-delta), which can significantly speed up the restore time for large databases. Passing in the [`--process-max`](https://pgbackrest.org/command.html#command-archive-get/category-general/option-process-max) option to `--backup-opts` can help speed up the restore process based upon the amount of CPU you have available. +- A pgBackRest backup can now be deleted with `pgo delete backup`. A backup name must be specified with the `--target` flag. Please refer to the [documentation](https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/disaster-recovery/#deleting-a-backup) for how to use this command. +- `pgo create cluster` now accepts a `--label` flag that can be used to specify one or more custom labels for a PostgreSQL cluster. This replaces the `--labels`flag. +- `pgo label` and `pgo delete label` can accept a `--label` flag specified multiple times. +- pgBadger can now be enabled/disabled during the lifetime of a PostgreSQL cluster using the `pgo update --enable-pgbadger` and `pgo update --disable-pgbadger` flag. This can also be modified directly on a custom resource. +- Managed PostgreSQL system accounts and now have their credentials set and rotated with `pgo update user` by including the `--set-system-account-password` flag. Suggested by (@srinathganesh). + +## Changes + +- If not provided at installation time, the Operator will now generate its own `pgo-backrest-repo-config` Secret. +- The `local` storage type option for pgBackRest is deprecated in favor of `posix`, which matches the pgBackRest term. `local` will still continue to work for backwards compatibility purposes. +- PostgreSQL clusters using multi-repository (e.g. `posix` + `s3` at the same time) archiving will now, by default, take backups to both repositories when `pgo backup` is used without additional options. +- If not provided a cluster creation time, the Operator will now generate the PostgreSQL user Secrets required for bootstrap, including the superuser (`postgres`), the replication user (`primaryuser`), and the standard user. +- `crunchy-postgres-exporter` now exposes several pgMonitor metrics related to `pg_stat_statements`. +- When using the `--restore-from` option on `pgo create cluster` to create a new PostgreSQL cluster, the cluster bootstrap Job is now automatically removed if it completes successfully. +- The `pgo failover` command now works without specifying a target: the candidate to fail over to will be automatically selected. +- For clusters that have no healthy instances, `pgo failover` can now force a promotion using the `--force` flag. A `--target` flag must also be specified when using `--force`. +- If a predefined custom ConfigMap for a PostgreSQL cluster (`-pgha-config`) is detected at bootstrap time, the Operator will ensure it properly initializes the cluster. +- Deleting a `pgclusters.crunchydata.com` custom resource will now properly delete a PostgreSQL cluster. If the `pgclusters.crunchydata.com` custom resource has the annotations `keep-backups` or `keep-data`, it will keep the backups or keep the PostgreSQL data directory respectively. Reported by Leo Khomenko (@lkhomenk). +- PostgreSQL JIT compilation is explicitly disabled on new cluster creation. This prevents a memory leak that has been observed on queries coming from the metrics exporter. +- The credentials for the metrics collection user are now available with `pgo show user --show-system-accounts`. +- The default user for executing scheduled SQL policies is now the Postgres superuser, instead of the replication user. +- Add the `--no-prompt` flag to `pgo upgrade`. The mechanism to disable the prompt verification was already in place, but the flag was not exposed. Reported by (@devopsevd). +- Remove certain characters that causes issues in shell environments from consideration when using the random password generator, which is used to create default passwords or with `--rotate-password`. +- Allow for the `--link-map` attribute for a pgBackRest option, which can help with the restore of an existing cluster to a new cluster that adds an external WAL volume. +- Remove the long deprecated `archivestorage` attribute from the `pgclusters.crunchydata.com` custom resource definition. As this attribute is not used at all, this should have no effect. +- The `ArchiveMode` parameter is now removed from the configuration. This had been fully deprecated for awhile. +- Add an explicit size limit of `64Mi` for the `pgBadger` ephemeral storage mount. Additionally, remove the ephemeral storage mount for the `/recover` mount point as that is not used. Reported by Pierre-Marie Petit (@pmpetit). +- New PostgreSQL Operator deployments will now generate ECDSA keys (P-256, SHA384) for use by the API server. + +## Fixes + +- Ensure custom annotations are applied if the annotations are supposed to be applied globally but the cluster does not have a pgBouncer Deployment. +- Fix issue with UBI 8 / CentOS 8 when running a pgBackRest bootstrap or restore job, where duplicate "repo types" could be set. Specifically, the ensures the name of the repo type is set via the `PGBACKREST_REPO1_TYPE` environmental variable. Reported by Alec Rooney (@alrooney). +- Fix issue where `pgo test` would indicate every Service was a replica if the cluster name contained the word `replica` in it. Reported by Jose Joye (@jose-joye). +- Do not consider Evicted Pods as part of `pgo test`. This eliminates a behavior where faux primaries are considered as part of `pgo test`. Reported by Dennis Jacobfeuerborn (@dennisjac). +- Fix `pgo df` to not fail in the event it tries to execute a command within a dangling container from the bootstrap process when `pgo create cluster --restore-from` is used. Reported by Ignacio J.Ortega (@IJOL). +- `pgo df` will now only attempt to execute in running Pods, i.e. it does not attempt to run in evicted Pods. Reported by (@kseswar). +- Ensure the sync replication ConfigMap is removed when a cluster is deleted. +- Fix crash in shutdown logic when attempting to shut down a cluster where no primaries exist. Reported by Jeffrey den Drijver (@JeffreyDD). +- Fix syntax in recovery check command which could lead to failures when manually promoting a standby cluster. Reported by (@SockenSalat). +- Fix potential race condition that could lead to a crash in the Operator boot when an error is issued around loading the `pgo-config` ConfigMap. Reported by Aleksander Roszig (@AleksanderRoszig). +- Do not trigger a backup if a standby cluster fails over. Reported by (@aprilito1965). +- Ensure pgBouncer Secret is created when adding it to a standby cluster. +- Generally improvements to initialization of a standby cluster. +- Remove legacy `defaultMode` setting on the volume instructions for the pgBackRest repo Secret as the `readOnly` setting is used on the mount itself. Reported by (@szhang1). +- Ensure proper label parsing based on Kubernetes rules and that it is consistently applied across all functionality that uses labels. Reported by José Joye (@jose-joye). +- The logger no longer defaults to using a log level of `DEBUG`. +- Autofailover is no longer disabled when an `rmdata` Job is run, enabling a clean database shutdown process when deleting a PostgreSQL cluster. +- Allow for `Restart` API server permission to be explicitly set. Reported by Aleksander Roszig (@AleksanderRoszig). +- Update `pgo-target` permissions to match expectations for modern Kubernetes versions. +- Major upgrade container now includes references for `pgnodemx`. +- During a major upgrade, ensure permissions are correct on the old data directory before running `pg_upgrade`. +- The metrics stack installer is fixed to work in environments that may not have connectivity to the Internet ("air gapped"). Reported by (@eliranw). diff --git a/docs/content/releases/4.6.1.md b/docs/content/releases/4.6.1.md new file mode 100644 index 0000000000..e3fd1587e5 --- /dev/null +++ b/docs/content/releases/4.6.1.md @@ -0,0 +1,33 @@ +--- +title: "4.6.1" +date: +draft: false +weight: 59 +--- + +Crunchy Data announces the release of the PostgreSQL Operator 4.6.1 on February 16, 2021. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +PostgreSQL Operator 4.6.1 release includes the following software versions upgrades: + +- [PostgreSQL](https://www.postgresql.org) is now at versions 13.2, 12.6, 11.11, 10.16, and 9.6.21. + +PostgreSQL Operator is tested against Kubernetes 1.17 - 1.20, OpenShift 3.11, OpenShift 4.4+, Google Kubernetes Engine (GKE), Amazon EKS, Microsoft AKS, and VMware Enterprise PKS 1.3+, and works on other Kubernetes distributions as well. + +# Changes +- The `--compress-type` flag is now supported for the backup options (`--backup-opts`) for pgBackRest backups with `pgo backup`. `none`, `gz`, `bz2`, and `lz4` are all supported. Presently `zst` is not supported. +- The post-cluster creation pgBackRest tasks, i.e. creating a stanza and creating an initial backup, are now deleted by the Operator should they complete successfully. Besides good housekeeping, this provides a workaround for an OpenShift 3.11 bug that was causing the Operator to continuously issue pgBackRest backups during an OpenShift refresh cycle. Reported by Paul Heinen (@v3nturetheworld). + +# Fixes +- Only attempts to start scheduled backups in running pgBackRest repository Pods. Reported by Satria Sahputra (@satriashp). +- Support the substitution for the limit on the number of queries to include the the `pg_stat_statements` support of pgMonitor. Defaults to 20, which is the pgMonitor upstream value. Contributed by Steven Siahetiong (@ssiahetiong). +- On initialization, check that primary PostgreSQL Deployment is created before attempting to scale. +- Fix issue with `pgo backup` where it was unable to take a backup from a new primary after `pgo failover` was called. Reported by (@mesobreira). +- Fix crash when attempting to create a cluster via the REST API and no custom labels were set. Reported by Jeffrey den Drijver (@JeffreyDD) +- Ensure a pgBadger sidecar is not added to a PostgreSQL cluster after a `pgo upgrade` if it was not previously specified. +- Ensure superfluous labels are deleted during a `pgo upgrade`. +- Remove `/crunchyadm` from `unix_socket_directories` configuration during a `pgo upgrade`. Reported by Steven Siahetiong (@ssiahetiong). +- Ensure updated paths, i.e. rename to `/opt/crunchy`, are reflected in cluster ConfigMap when running `pgo upgrade`. Reported by Steven Siahetiong (@ssiahetiong). +- Ensure value from `--ccp-image-tag` is applied when running `pgo upgrade`. +- Ensure the pgBackRest repository sets ownership settings to the `pgbackrest` user, which, while not noticed under most operating environments, could manifest itself in different UID modes. Reported by Matt Russell (@mjrussell). diff --git a/docs/content/releases/4.6.10.md b/docs/content/releases/4.6.10.md new file mode 100644 index 0000000000..d756555396 --- /dev/null +++ b/docs/content/releases/4.6.10.md @@ -0,0 +1,22 @@ +--- +title: "4.6.10" +date: +draft: false +weight: 50 +--- + +Crunchy Data announces the release of PGO, the Postgres Operator 4.6.10. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +Crunchy Postgres for Kubernetes 4.6.10 includes the following software versions upgrades: + +- [PostgreSQL](https://www.postgresql.org) versions 13.10, 12.14 and 11.19 are now available. +- [PgBouncer](https://www.pgbouncer.org/) is now at version 1.18. +- The `orafce` extension is now at version 4.1.1. +- The `pg_partman` extension is now at version 4.7.2. +- The `set_user` extension is now at version 4.0.1. +- The `TimescaleDB` extension is now at version 2.9.2. + +## Fixes +- The `crunchy-pgadmin` container for UBI 7 and CentOS 7 no longer throws an error when starting. diff --git a/docs/content/releases/4.6.2.md b/docs/content/releases/4.6.2.md new file mode 100644 index 0000000000..2cc890e38f --- /dev/null +++ b/docs/content/releases/4.6.2.md @@ -0,0 +1,31 @@ +--- +title: "4.6.2" +date: +draft: false +weight: 58 +--- + +Crunchy Data announces the release of the PostgreSQL Operator 4.6.2 on March 22, 2021. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +PostgreSQL Operator 4.6.2 release includes the following software versions upgrades: + +- [Patroni](https://patroni.readthedocs.io/) is now at version 2.0.2. +- [pgBouncer](https://www.pgbouncer.org/) for CentOS 8 / UBI 8 is rebuilt to use the libc for its async DNS backend. + +PostgreSQL Operator is tested against Kubernetes 1.17 - 1.20, OpenShift 3.11, OpenShift 4.4+, Google Kubernetes Engine (GKE), Amazon EKS, Microsoft AKS, and VMware Enterprise PKS 1.3+, and works on other Kubernetes distributions as well. + +## Changes + +- The Postgres Operator and associated containers now contain defaults to use more locked down Pod and Container security context settings. These include setting `allowPrivilegeEscalation` to `false` and explicitly stating that the container should not run as `root`. Many of these were already honored, if not defaulted, within the Postgres Operator ecosystem, but these changes make the settings explicit. This is all configuration: there are no breaking changes, and these configurations can be supported down to at least the 4.2 series. +- Revert setting "UsePAM" to "yes" by default as the bug fix in Docker that required that change was applied roughly one year ago. +- On Operator boot, Automatically detect when deployed in an OpenShift environment and set `DisableFSGroup` to `true`. This makes it easier to get started with the Postgres Operator in an OpenShift environment with the default security settings (i.e. `restricted`). If you use the `anyuid` Security Context Constraint, you will need to explicitly set `DisableFSGroup` to `false`. + +## Fixes + +- Ensure `archive_mode` is forced to `on` when performing using the "restore in place" method. This ensures that the timeline is correctly incremented post-restore, which could manifest itself with various types of WAL archive failures. +- Fix error when attempting to perform restores when using node affinity. Reported by (@gilfrade) and Cristian Chiru (@cristichiru). +- Fix issue where certain pgAdmin 4 functions did not work (e.g. taking a backup) due to `python` references in EL8 containers. Reported by (@douggutaby). +- Ensure a Postgres cluster shutdown can execute even if the `status` subresource of a `pgclusters.crunchydata.com` custom resource is missing. +- Ensure major upgrades via `crunchy-upgrade` support PostgreSQL 12 and PostgreSQL 13. Reported by (@lbartnicki92). diff --git a/docs/content/releases/4.6.3.md b/docs/content/releases/4.6.3.md new file mode 100644 index 0000000000..be402ffbaa --- /dev/null +++ b/docs/content/releases/4.6.3.md @@ -0,0 +1,29 @@ +--- +title: "4.6.3" +date: +draft: false +weight: 57 +--- + +Crunchy Data announces the release of the PostgreSQL Operator 4.6.3 on May 25, 2021. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +PostgreSQL Operator 4.6.3 release includes the following software versions upgrades: + +- [PostgreSQL](https://www.postgresql.org/) is now at 13.3, 12.7, 11.12, 10.17, and 9.6.22. +- [pgMonitor](https://github.com/CrunchyData/pgmonitor) is now at 4.4-1. + +## Changes + +- Allow for the `PGOADMIN_USERNAME`, `PGOADMIN_PASSWORD`, `PGOADMIN_ROLENAME` credential bootstrap variables to be overriden as part of the OLM and development install process. Contributed by Mathieu Parent (@sathieu). +- Update Helm installer to follow appropriate conventions. Contributed by Jakub Ráček (@kubaracek) + +## Fixes + +- Fix crash due to superfluous trailing whitespace when parsing `--backup-opts` in `pgo backup`. Reported by Samir Faci (@safaci2000). +- Ensure `sshd_config` is correctly set on an upgrade. This could have manifested with some pgBackRest functionality not working. This can be manually fixed by setting `UsePAM no` in the `sshd_config` file in a cluster. Reported by (@douggutaby). +- Fix issue where metrics about pgBackRest backups could not be scraped if the backups were stored in a S3-like environment that requires the use of disabling TLS verification. Reported by (@lphan-clv) and (@dakine1111). +- Fix how the pgAdmin 4 Service is identified in `pgo test`. Prior to this, it was identified as a "primary"; now it is "pgadmin". +- Fix nonbreaking error message that occurs when `pgo-scheduler` container shuts down in the UBI 8 base container. +- The `pgo-deployer` and Ansible installer will no longer create an initial TLS secret for the PGO apiserver. PGO apiserver has been able to self-create this for a long time, and PGO defers to that. This fixes an issue that occurred on newer builds where certificates generated by OpenSSL contained incomplete usage blocks, which could cause for these certificates to be properly outright rejected. diff --git a/docs/content/releases/4.6.4.md b/docs/content/releases/4.6.4.md new file mode 100644 index 0000000000..74bcf1cefa --- /dev/null +++ b/docs/content/releases/4.6.4.md @@ -0,0 +1,26 @@ +--- +title: "4.6.4" +date: +draft: false +weight: 56 +--- + +Crunchy Data announces the release of PGO, the Postgres Operator 4.6.4. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +PostgreSQL Operator 4.6.4 release includes the following software versions upgrades: + +- [PostgreSQL](https://www.postgresql.org) is updated to 13.4, 12.8, 11.13, 10.18, and 9.6.23. +- [pgaudit_analyze](https://github.com/pgaudit/pgaudit_analyze) is now at 1.0.8. +- [set_user](https://github.com/pgaudit/set_user) is now at version 2.0.1. + +## Changes + +- On using the built-in upgrade tool, the `pg-pod-anti-affinity` is now removed from the `userlabels` section of a `pgclusters.crunchydata.com` custom resource. +- Ensure `vendor` label is propagated to all PGO managed objects. Reported by (@mdraijer). + +## Fixes + +- Allow backup configuration to be changed when a cluster is recreated. For example, allow backup configuration to change from posix to s3 within a new cluster. +- Ensure a SQL policy that contains writes can be applied to a Postgres cluster after a failover. diff --git a/docs/content/releases/4.6.5.md b/docs/content/releases/4.6.5.md new file mode 100644 index 0000000000..00614b122e --- /dev/null +++ b/docs/content/releases/4.6.5.md @@ -0,0 +1,28 @@ +--- +title: "4.6.5" +date: +draft: false +weight: 55 +--- + +Crunchy Data announces the release of PGO, the Postgres Operator 4.6.5. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +PostgreSQL Operator 4.6.5 release includes the following software versions upgrades: + +- [PostgreSQL](https://www.postgresql.org) versions 13.5, 12.9, 11.14, 10.19 and 9.6.24 are now available. +- The [pgnodemx](https://github.com/CrunchyData/pgnodemx) extension is now at version 1.0.6. +- [pgBouncer](https://www.pgbouncer.org/) is now at version 1.16.1 +- The [pgAudit](https://github.com/pgaudit/pgaudit) extension is now at version 1.6.1 + +## Changes + +- Update automatic OpenShift detection logic to look specifically for the presence of the SecurityContextConstraint API. Reported by (@aurelien43). + +## Fixes + +- Ensure the `pgo create pgbouncer` command can set CPU and memory limits via `--cpu-limit` and `--memory-limit` respectively. +- Ensure `pgo delete backup` works with backups stored in S3 or GCS. Reported by Munjal Patel (@munjalpatel). +- Update the `aws-s3-ca.crt` value to use the newer CAs provided by AWS. If a PostgreSQL cluster is using the old default CA, PGO will update the general one kept in the `pgo-backrest-repo-config` Secret and `pgo upgrade` will update it for a specific cluster. +- Allow for the original primary instance to be scaled down after running `pgo upgrade`. diff --git a/docs/content/releases/4.6.6.md b/docs/content/releases/4.6.6.md new file mode 100644 index 0000000000..5d8577acce --- /dev/null +++ b/docs/content/releases/4.6.6.md @@ -0,0 +1,18 @@ +--- +title: "4.6.6" +date: +draft: false +weight: 54 +--- + +Crunchy Data announces the release of PGO, the Postgres Operator 4.6.6. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +PostgreSQL Operator 4.6.6 release includes the following software versions upgrades: + +- [PostgreSQL](https://www.postgresql.org) versions 13.6, 12.10, 11.15 and 10.20 are now available. + +## Changes + +- The version of Go utilized to build `yq` is now aligned with all other Go binaries. diff --git a/docs/content/releases/4.6.7.md b/docs/content/releases/4.6.7.md new file mode 100644 index 0000000000..a8dd0df08f --- /dev/null +++ b/docs/content/releases/4.6.7.md @@ -0,0 +1,17 @@ +--- +title: "4.6.7" +date: +draft: false +weight: 53 +--- + +Crunchy Data announces the release of PGO, the Postgres Operator 4.6.7. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +PostgreSQL Operator 4.6.7 release includes the following software versions upgrades: + +- [PostgreSQL](https://www.postgresql.org) versions 14.3, 13.7, 12.11, 11.16, and 10.21 are now available. +- [PostGIS](http://postgis.net/) version 3.1.4 is now available. +- The [pg_partman](https://github.com/pgpartman/pg_partman) extension is now at version 4.6.1. +- The [TimescaleDB](https://github.com/timescale/timescaledb) extension is now at version 2.6.1. diff --git a/docs/content/releases/4.6.8.md b/docs/content/releases/4.6.8.md new file mode 100644 index 0000000000..1892dfb501 --- /dev/null +++ b/docs/content/releases/4.6.8.md @@ -0,0 +1,17 @@ +--- +title: "4.6.8" +date: +draft: false +weight: 52 +--- + +Crunchy Data announces the release of PGO, the Postgres Operator 4.6.8. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +PostgreSQL Operator 4.6.8 release includes the following software versions upgrades: + +- [PostgreSQL](https://www.postgresql.org) versions 13.8, 12.12, 11.17, and 10.22 are now available. +- [PgBouncer](https://www.pgbouncer.org/) is now at version 1.17. +- The [pg_partman](https://github.com/pgpartman/pg_partman) extension is now at version 4.6.2. +- The [TimescaleDB](https://github.com/timescale/timescaledb) extension is now at version 2.7.2. diff --git a/docs/content/releases/4.6.9.md b/docs/content/releases/4.6.9.md new file mode 100644 index 0000000000..0734b9eba7 --- /dev/null +++ b/docs/content/releases/4.6.9.md @@ -0,0 +1,16 @@ +--- +title: "4.6.9" +date: +draft: false +weight: 51 +--- + +Crunchy Data announces the release of PGO, the Postgres Operator 4.6.9. + +The PostgreSQL Operator is released in conjunction with the [Crunchy Container Suite](https://github.com/CrunchyData/crunchy-containers/). + +PostgreSQL Operator 4.6.9 release includes the following software versions upgrades: + +- [PostgreSQL](https://www.postgresql.org) versions 13.9, 12.13, 11.18, and 10.23 are now available. +- The [pg_partman](https://github.com/pgpartman/pg_partman) extension is now at version 4.7.1. +- The [TimescaleDB](https://github.com/timescale/timescaledb) extension is now at version 2.8.1. diff --git a/docs/content/support/_index.md b/docs/content/support/_index.md index 707b93bc29..f8a974c84b 100644 --- a/docs/content/support/_index.md +++ b/docs/content/support/_index.md @@ -5,15 +5,15 @@ draft: false weight: 110 --- -There are a few options available for community support of the [PostgreSQL Operator](https://github.com/CrunchyData/postgres-operator): +There are a few options available for community support of the [PGO: the Postgres Operator](https://github.com/CrunchyData/postgres-operator): -- **If you believe you have found a bug** or have a detailed feature request: please open [an issue on GitHub](https://github.com/CrunchyData/postgres-operator/issues/new/choose). The PostgreSQL Operator community and the Crunchy Data team behind the PostgreSQL Operator is generally active in responding to issues. +- **If you believe you have found a bug** or have a detailed feature request: please open [an issue on GitHub](https://github.com/CrunchyData/postgres-operator/issues/new/choose). The Postgres Operator community and the Crunchy Data team behind the PGO is generally active in responding to issues. - **For general questions or community support**: please join the [PostgreSQL Operator community mailing list](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join) at [https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join](https://groups.google.com/a/crunchydata.com/forum/#!forum/postgres-operator/join), In all cases, please be sure to provide as many details as possible in regards to your issue, including: - Your Platform (e.g. Kubernetes vX.YY.Z) -- Operator Version (e.g. {{< param centosBase >}}-{{< param operatorVersion >}}) +- Operator Version (e.g. {{< param ubiBase >}}-{{< param operatorVersion >}}) - A detailed description of the issue, as well as steps you took that lead up to the issue - Any relevant logs - Any additional information you can provide that you may find helpful diff --git a/docs/content/tutorial/_index.md b/docs/content/tutorial/_index.md index 2919cc6a25..0babd127dc 100644 --- a/docs/content/tutorial/_index.md +++ b/docs/content/tutorial/_index.md @@ -4,10 +4,10 @@ draft: false weight: 15 --- -The PostgreSQL Operator provides functionality that lets you run your own database-as-a-service: from deploying PostgreSQL clusters with [high availability]({{< relref "architecture/high-availability/_index.md" >}}), to a [full stack monitoring]({{< relref "architecture/high-availability/_index.md" >}}) solution, essential [disaster recovery and backup tools]({{< relref "architecture/disaster-recovery.md" >}}), the ability to secure your cluster with TLS, and much more! +PGO, the Postgres Operator, provides functionality that lets you run your own database-as-a-service: from deploying PostgreSQL clusters with [high availability]({{< relref "architecture/high-availability/_index.md" >}}), to a [full stack monitoring]({{< relref "architecture/high-availability/_index.md" >}}) solution, essential [disaster recovery and backup tools]({{< relref "architecture/disaster-recovery.md" >}}), the ability to secure your cluster with TLS, and much more! What's more, you can manage your PostgreSQL clusters with the convenient [`pgo` client]({{< relref "pgo-client/_index.md" >}}) or by interfacing directly with the PostgreSQL Operator [custom resources]({{< relref "custom-resources/_index.md" >}}). -Given the robustness of the PostgreSQL Operator, we think it's helpful to break down the functionality in this step-by-step tutorial. The tutorial covers the essential functions the PostgreSQL Operator can perform and covers many common basic and advanced use cases. +Given the robustness of PGO, we think it's helpful to break down the functionality in this step-by-step tutorial. The tutorial covers the essential functions the Postgres Operator can perform and covers many common basic and advanced use cases. So what are you waiting for? Let's [get started]({{< relref "tutorial/getting-started.md" >}})! diff --git a/docs/content/tutorial/create-cluster.md b/docs/content/tutorial/create-cluster.md index eeb798faf5..6db4090269 100644 --- a/docs/content/tutorial/create-cluster.md +++ b/docs/content/tutorial/create-cluster.md @@ -65,7 +65,7 @@ So what just happened? Let's break down what occurs during the create cluster pr - Creating [persistent volume claims](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) (PVCs) for the PostgreSQL instance and the pgBackRest repository. - Creating [services](https://kubernetes.io/docs/concepts/services-networking/service/) that provide a stable network interface for connecting to the PostgreSQL instance and pgBackRest repository. - Creating [deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) that house each PostgreSQL instance and pgBackRest repository. Each of these is responsible for one Pod. - - The PostgreSQL Pod, when it is started, provisions a PostgreSQL database and performs other bootstrapping functions, such as creating `testuer`. + - The PostgreSQL Pod, when it is started, provisions a PostgreSQL database and performs other bootstrapping functions, such as creating `testuser`. - The pgBackRest Pod, when it is started, initializes a pgBackRest repository. Note that the pgBackRest repository is not yet ready to start taking backups, but will be after the next step! 3. When the PostgreSQL Operator detects that the PostgreSQL and pgBackRest deployments are up and running, it creates a Kubenretes Job to create a pgBackRest stanza. This is necessary as part of intializing the pgBackRest repository to accept backups from our PostgreSQL cluster. @@ -120,6 +120,33 @@ Also ensure that you have enough persistent volumes available: your Kubernetes a The most common occurrence of this is due to the Kubernetes network blocking SSH connections between Pods. Ensure that your Kubernetes networking layer allows for SSH connections over port 2022 in the Namespace that you are deploying your PostgreSQL clusters into. +### PostgreSQL Pod reports "Authentication Failed for `ccp_monitoring`" + +This is a temporary error that occurs when a new PostgreSQL cluster is first +initialized with the `--metrics` flag. The `crunchy-postgres-exporter` container +within the PostgreSQL Pod may be ready before the container with PostgreSQL is +ready. If a message in your logs further down displays a timestamp, e.g.: + +``` + now +------------------------------- +2020-11-10 08:23:15.968196-05 +``` + +Then the `ccp_monitoring` user is properly reconciled with the PostgreSQL +cluster. + +If the error message does not go away, this could indicate a few things: + +- The PostgreSQL instance has not initialized. Check to ensure that PostgreSQL +has successfully started. +- The password for the `ccp_monitoring` user has changed. In this case you will +need to update the Secret with the monitoring credentials. + +## Custom Resources + +You may also be curious about how to perform the same actions directly with [custom resources]({{< relref "custom-resources/_index.md" >}}). If that is the case, we encourage to skip ahead to the [Custom Resources]({{< relref "custom-resources/_index.md" >}}) section of the documentation. + ## Next Steps Once your cluster is created, the next step is to [connect to your PostgreSQL cluster]({{< relref "tutorial/connect-cluster.md" >}}). You can also [learn how to customize your PostgreSQL cluster]({{< relref "tutorial/customize-cluster.md" >}})! diff --git a/docs/content/tutorial/customize-cluster.md b/docs/content/tutorial/customize-cluster.md index e9be31c268..7006b70a20 100644 --- a/docs/content/tutorial/customize-cluster.md +++ b/docs/content/tutorial/customize-cluster.md @@ -7,7 +7,7 @@ weight: 130 The PostgreSQL Operator makes it very easy and quick to [create a cluster]({{< relref "tutorial/create-cluster.md" >}}), but there are possibly more customizations you want to make to your cluster. These include: - Resource allocations (e.g. Memory, CPU, PVC size) -- Sidecars (e.g. [Monitoring]({{< relref "architecture/monitoring.md" >}}), pgBouncer, [pgAdmin 4]({{< relref "architecture/pgadmin4.md" >}})) +- Sidecars (e.g. [Monitoring]({{< relref "architecture/monitoring.md" >}}), [pgBouncer]({{< relref "tutorial/pgbouncer.md" >}}), [pgAdmin 4]({{< relref "architecture/pgadmin4.md" >}})) - High Availability (e.g. adding replicas) - Specifying specific PostgreSQL images (e.g. one with PostGIS) - Specifying a [Pod anti-affinity and Node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) @@ -30,6 +30,12 @@ pgo create cluster hippo --metrics Note that the `--metrics` flag just enables a sidecar that can be scraped. You will need to install the [monitoring stack]({{< relref "installation/metrics/_index.md" >}}) separately, or tie it into your existing monitoring infrastructure. +If you have an exiting cluster that you would like to add metrics collection to, you can use the `--enable-metrics` flag on the [`pgo update cluster`]({{< relref "pgo-client/reference/pgo_create_cluster.md" >}}) command: + +``` +pgo update cluster hippo --enable-metrics +``` + ## Customize PVC Size Databases come in all different sizes, and those sizes can certainly change over time. As such, it is helpful to be able to specify what size PVC you want to store your PostgreSQL data. @@ -130,6 +136,44 @@ pgo create cluster hippo --replica-count=1 You can scale up and down your PostgreSQL cluster with the [`pgo scale`]({{< relref "pgo-client/reference/pgo_scale.md" >}}) and [`pgo scaledown`]({{< relref "pgo-client/reference/pgo_scaledown.md" >}}) commands. +## Set Tolerations for a PostgreSQL Cluster + +[Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) help with the scheduling of Pods to appropriate nodes. There are many reasons that a Kubernetes administrator may want to use tolerations, such as restricting the types of Pods that can be assigned to particular nodes. + +The PostgreSQL Operator supports adding tolerations to PostgreSQL instances using the `--toleration` flag. The format for adding a toleration is as such: + +``` +rule:Effect +``` + +or + +``` +rule +``` + +where a `rule` can represent existence (e.g. `key`) or equality (`key=value`) and `Effect` is one of `NoSchedule`, `PreferNoSchedule`, or `NoExecute`. For more information on how tolerations work, please refer to the [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + +You can assign multiple tolerations to a PostgreSQL cluster. + +For example, to add two tolerations to a new PostgreSQL cluster, one that is an existence toleration for a key of `ssd` and the other that is an equality toleration for a key/value pair of `zone`/`east`, you can run the following command: + +``` +pgo create cluster hippo \ + --toleration=ssd:NoSchedule \ + --toleration=zone=east:NoSchedule +``` + +Tolerations can be updated on an existing cluster using the [`pgo update cluster`]({{ relref "pgo-client/reference/pgo_update_cluster.md" }}) command. For example, to add a toleration of `zone=west:NoSchedule` and remove the toleration of `zone=east:NoSchedule`, you could run the following command: + +``` +pgo update cluster hippo \ + --toleration=zone=west:NoSchedule \ + --toleration=zone-east:NoSchedule- +``` + +You can also add or edit tolerations directly on the `pgclusters.crunchydata.com` custom resource and the PostgreSQL Operator will roll out the changes to the appropriate instances. + ## Customize PostgreSQL Configuration PostgreSQL provides a lot of different knobs that can be used to fine tune the [configuration](https://www.postgresql.org/docs/current/runtime-config.html) for your workload. While you can [customize your PostgreSQL configuration]({{< relref "advanced/custom-configuration.md" >}}) after your cluster has been deployed, you may also want to load in your custom configuration during initialization. @@ -184,6 +228,33 @@ There are many reasons why a PostgreSQL Pod may not be scheduled: - **Node affinity rules cannot be satisfied**. If you assigned a node label, ensure that the Nodes with that label are available for scheduling. If they are, ensure that there are enough resources available. - **Pod anti-affinity rules cannot be satisfied**. This most likely happens when [pod anti-affinity]({{< relref "architecture/high-availability/_index.md" >}}#how-the-crunchy-postgresql-operator-uses-pod-anti-affinity) is set to `required` and there are not enough Nodes available for scheduling. Consider adding more Nodes or relaxing your anti-affinity rules. +### PostgreSQL Pod reports "Authentication Failed for `ccp_monitoring`" + +This is a temporary error that occurs when a new PostgreSQL cluster is first +initialized with the `--metrics` flag. The `crunchy-postgres-exporter` container +within the PostgreSQL Pod may be ready before the container with PostgreSQL is +ready. If a message in your logs further down displays a timestamp, e.g.: + +``` + now +------------------------------- +2020-11-10 08:23:15.968196-05 +``` + +Then the `ccp_monitoring` user is properly reconciled with the PostgreSQL +cluster. + +If the error message does not go away, this could indicate a few things: + +- The PostgreSQL instance has not initialized. Check to ensure that PostgreSQL +has successfully started. +- The password for the `ccp_monitoring` user has changed. In this case you will +need to update the Secret with the monitoring credentials. + +### PostgreSQL Pod Not Scheduled to Nodes Matching Tolerations + +While Kubernetes [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) allow for Pods to be scheduled to Nodes based on their taints, this does not mean that the Pod _will_ be assigned to those nodes. To provide Kubernetes scheduling guidance on where a Pod should be assigned, you must also use [Node Affinity]({{< relref "architecture/high-availability/_index.md" >}}#node-affinity). + ## Next Steps As mentioned at the beginning, there are a lot more customizations that you can make to your PostgreSQL cluster, and we will cover those as the tutorial progresses! This section was to get you familiar with some of the most common customizations, and to explore how many options `pgo create cluster` has! diff --git a/docs/content/tutorial/disaster-recovery.md b/docs/content/tutorial/disaster-recovery.md index ca05361674..2d87319ec9 100644 --- a/docs/content/tutorial/disaster-recovery.md +++ b/docs/content/tutorial/disaster-recovery.md @@ -183,6 +183,92 @@ When the restore is complete, the cluster is immediately available for reads and The PostgreSQL Operator supports the full set of pgBackRest restore options, which can be passed into the `--backup-opts` parameter. For more information, please review the [pgBackRest restore options](https://pgbackrest.org/command.html#command-restore) +## Deleting a Backup + +You typically do not want to delete backups. Instead, it's better to set a backup retention policy as part of [scheduling your ackups](#schedule-backups). + +However, there are situations where you may want to explicitly delete backups, in particular, if you need to reclaim space on your backup disk or if you accidentally created too many backups. + +{{% notice warning %}} +If you delete a backup that is *not* set to expire, you may be unable to meet your retention requirements. If you are deleting backups to free space, it is recommended to delete your oldest backups first. +{{% /notice %}} + +In these cases, a backup can be deleted using the [`pgo delete backup`]({{< relref "pgo-client/reference/pgo_delete_backup.md" >}}) +command. You must specify a specific backup to delete using the `--target` flag. You can get the backup names from the [`pgo show backup`]({{< relref "pgo-client/reference/pgo_show_backup.md" >}}) command. + +Let's say that the `hippo` cluster currently has a set of backups that look like this, obtained from running the `pgo show backup hippo` command: + +``` +cluster: hippo +storage type: posix + +stanza: db + status: ok + cipher: none + + db (current) + wal archive min/max (12-1) + + full backup: 20201220-171801F + timestamp start/stop: 2020-12-20 17:18:01 +0000 UTC / 2020-12-20 17:18:10 +0000 UTC + wal start/stop: 000000010000000000000002 / 000000010000000000000002 + database size: 31.3MiB, backup size: 31.3MiB + repository size: 3.8MiB, repository backup size: 3.8MiB + backup reference list: + + incr backup: 20201220-171801F_20201220-171939I + timestamp start/stop: 2020-12-20 17:19:39 +0000 UTC / 2020-12-20 17:19:41 +0000 UTC + wal start/stop: 000000010000000000000005 / 000000010000000000000005 + database size: 31.3MiB, backup size: 216.3KiB + repository size: 3.8MiB, repository backup size: 25.9KiB + backup reference list: 20201220-171801F + + incr backup: 20201220-171801F_20201220-172046I + timestamp start/stop: 2020-12-20 17:20:46 +0000 UTC / 2020-12-20 17:23:29 +0000 UTC + wal start/stop: 00000001000000000000000A / 00000001000000000000000A + database size: 65.9MiB, backup size: 37.5MiB + repository size: 7.7MiB, repository backup size: 4.3MiB + backup reference list: 20201220-171801F, 20201220-171801F_20201220-171939I + + full backup: 20201220-201305F + timestamp start/stop: 2020-12-20 20:13:05 +0000 UTC / 2020-12-20 20:13:15 +0000 UTC + wal start/stop: 00000001000000000000000F / 00000001000000000000000F + database size: 65.9MiB, backup size: 65.9MiB + repository size: 7.7MiB, repository backup size: 7.7MiB + backup reference list: +``` + +Note that the backup targets can be found after the backup type, e.g. `20201220-171801F` or `20201220-171801F_20201220-172046I`. + +One can delete the oldest backup, in this case `20201220-171801F`, by running the following command: + +``` +pgo delete backup hippo --target=20201220-171801F +``` + +You can then verify the backup is deleted with `pgo show backup hippo`: + +``` +cluster: hippo +storage type: posix + +stanza: db + status: ok + cipher: none + + db (current) + wal archive min/max (12-1) + + full backup: 20201220-201305F + timestamp start/stop: 2020-12-20 20:13:05 +0000 UTC / 2020-12-20 20:13:15 +0000 UTC + wal start/stop: 00000001000000000000000F / 00000001000000000000000F + database size: 65.9MiB, backup size: 65.9MiB + repository size: 7.7MiB, repository backup size: 7.7MiB + backup reference list: +``` + +Note that deleting the oldest backup also had the effect of deleting all of the backups that depended on it. This is a feature of [pgBackRest](https://pgbackrest.org/)! + ## Next Steps There are cases where you may want to take [logical backups]({{< relref "tutorial/pgdump.md" >}}), aka `pg_dump` / `pg_dumpall`. Let's learn how to do that with the PostgreSQL Operator! diff --git a/docs/content/tutorial/getting-started.md b/docs/content/tutorial/getting-started.md index 8422487ed1..c02fd355a4 100644 --- a/docs/content/tutorial/getting-started.md +++ b/docs/content/tutorial/getting-started.md @@ -6,17 +6,17 @@ weight: 100 ## Installation -If you have not installed the PostgreSQL Operator yet, we recommend you take a look at our [quickstart]({{< relref "quickstart/_index.md" >}}) or the [installation]({{< relref "installation/_index.md" >}}) sections. +If you have not installed PGO, the Postgres Operator, yet, we recommend you take a look at our [quickstart]({{< relref "quickstart/_index.md" >}}) or the [installation]({{< relref "installation/_index.md" >}}) sections. ### Customizing an Installation -How to customize a PostgreSQL Operator installation is a lengthy topic. The details are covered in the [installation]({{< relref "installation/postgres-operator.md" >}}) section, as well as a list of all the [configuration variables]({{< relref "installation/configuration.md" >}}) available. +How to customize a PGO installation is a lengthy topic. The details are covered in the [installation]({{< relref "installation/postgres-operator.md" >}}) section, as well as a list of all the [configuration variables]({{< relref "installation/configuration.md" >}}) available. ## Setup the `pgo` Client -This tutorial will be using the [`pgo` client]({{< relref "pgo-client/_index.md" >}}) to interact with the PostgreSQL Operator. Please follow the instructions in the [quickstart]({{< relref "quickstart/_index.md" >}}) or the [installation]({{< relref "installation/pgo-client.md" >}}) sections for how to configure the `pgo` client. +This tutorial will be using the [`pgo` client]({{< relref "pgo-client/_index.md" >}}) to interact with the Postgres Operator. Please follow the instructions in the [quickstart]({{< relref "quickstart/_index.md" >}}) or the [installation]({{< relref "installation/pgo-client.md" >}}) sections for how to configure the `pgo` client. -The PostgreSQL Operator and `pgo` client are designed to work in a [multi-namespace deployment environment]({{< relref "architecture/namespace.md" >}}) and many `pgo` commands require that the namespace flag (`-n`) are passed into it. You can use the `PGO_NAMESPACE` environmental variable to set which namespace a `pgo` command can use. For example: +The Postgres Operator and `pgo` client are designed to work in a [multi-namespace deployment environment]({{< relref "architecture/namespace.md" >}}) and many `pgo` commands require that the namespace flag (`-n`) are passed into it. You can use the `PGO_NAMESPACE` environmental variable to set which namespace a `pgo` command can use. For example: ``` export PGO_NAMESPACE=pgo @@ -39,13 +39,13 @@ export PGO_NAMESPACE=pgo ## Next Steps -Before proceeding, please make sure that your `pgo` client setup can communicate with your PostgreSQL Operator. In a separate terminal window, set up a port forward to your PostgreSQL Operator: +Before proceeding, please make sure that your `pgo` client setup can communicate with your PGO Deployment. In a separate terminal window, set up a port forward to your PostgreSQL Operator: ``` kubectl port-forward -n pgo svc/postgres-operator 8443:8443 ``` -The [`pgo version`]({{< relref "pgo-client/reference/pgo_version.md" >}}) command is a great way to check connectivity with the PostgreSQL Operator, as it is a very simple, safe operation. Try it out: +The [`pgo version`]({{< relref "pgo-client/reference/pgo_version.md" >}}) command is a great way to check connectivity with the Postgres Operator, as it is a very simple, safe operation. Try it out: ``` pgo version @@ -72,4 +72,4 @@ which yields results similar to: pgo client version {{< param operatorVersion >}} ``` -Alright, we're now ready to start our journey with the PostgreSQL Operator! +Alright, we're now ready to start our journey with PGO! diff --git a/docs/content/tutorial/high-availability.md b/docs/content/tutorial/high-availability.md index a3c2a12bea..2d277d382c 100644 --- a/docs/content/tutorial/high-availability.md +++ b/docs/content/tutorial/high-availability.md @@ -62,7 +62,28 @@ pgo scaledown hippo --target=hippo-ojnd ## Manual Failover -Each PostgreSQL cluster will manage its own availability. If you wish to manually fail over, you will need to use the [`pgo failover`]({{< relref "pgo-client/reference/pgo_failover.md">}}) command. First, determine which instance you want to fail over to: +Each PostgreSQL cluster will manage its own availability. If you wish to manually fail over, you will need to use the [`pgo failover`]({{< relref "pgo-client/reference/pgo_failover.md">}}) command. + +There are two ways to issue a manual failover to your PostgreSQL cluster: + +1. Allow for the PostgreSQL Operator to select the best replica candidate for failover. +2. Select your own replica candidate for failover. + +Both methods are detailed below. + +### Manual Failover - PostgreSQL Operator Candidate Selection + +To have the PostgreSQL Operator select the best replica candidate for failover, all you need to do is execute the following command: + +``` +pgo failover hippo +``` + +The PostgreSQL Operator will determine which is the best replica candidate to fail over to, and take into account factors such as replication lag and current timeline. + +### Manual Failover - Manual Selection + +If you wish to have your cluster manually failover, you must first query your determine which instance you want to fail over to. You can do so with the following command: ``` pgo failover hippo --query @@ -94,7 +115,33 @@ Please understand the tradeoffs of synchronous replication before using it. ## Pod Anti-Affinity and Node Affinity -To leran how to use pod anti-affinity and node affinity, please refer to the [high availability architecture documentation]({{< relref "architecture/high-availability/_index.md" >}}) +To learn how to use pod anti-affinity and node affinity, please refer to the [high availability architecture documentation]({{< relref "architecture/high-availability/_index.md" >}}). + +## Tolerations + +If you want to have a PostgreSQL instance use specific Kubernetes [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/), you can use the `--toleration` flag on [`pgo scale`]({{< relref "pgo-client/reference/pgo_scale.md">}}). Any tolerations added to the new PostgreSQL instance fully replace any tolerations available to the entire cluster. + +For example, to assign equality toleration for a key/value pair of `zone`/`west`, you can run the following command: + +``` +pgo scale hippo --toleration=zone=west:NoSchedule +``` + +For more information on the PostgreSQL Operator and tolerations, please review the [high availability architecture documentation]({{< relref "architecture/high-availability/_index.md" >}}). + +## Troubleshooting + +### No Primary Available After Both Synchronous Replication Instances Fail + +Though synchronous replication is available for guarding against transaction loss for [write sensitive workloads]({{< relref "architecture/high-availability/_index.md" >}}#synchronous-replication-guarding-against-transactions-loss), by default the high availability systems prefers availability over consistency and will continue to accept writes to a primary even if a replica fails. Additionally, in most scenarios, a system using synchronous replication will be able to recover and self heal should a primary or a replica go down. + +However, in the case that both a primary and its synchronous replica go down at the same time, a new primary may not be promoted. To guard against transaction loss, the high availability system will not promote any instances if it cannot determine if they had been one of the synchronous instances. As such, when it recovers, it will bring up all the instances as replicas. + +To get out of this situation, inspect the replicas using `pgo failover --query` to determine the best candidate (typically the one with the least amount of replication lag). After determining the best candidate, promote one of the replicas using `pgo failover --target` command. + +If you are still having issues, you may need to execute into one of the Pods and inspect the state with the `patronictl` command. + +A detailed breakdown of this case be found [here](https://github.com/CrunchyData/postgres-operator/issues/2132#issuecomment-748719843). ## Next Steps diff --git a/docs/content/tutorial/pgbouncer.md b/docs/content/tutorial/pgbouncer.md index 89ba8ce993..f8e5dd6364 100644 --- a/docs/content/tutorial/pgbouncer.md +++ b/docs/content/tutorial/pgbouncer.md @@ -116,7 +116,7 @@ PGPASSWORD=randompassword psql -h localhost -p 5432 -U pgbouncer pgbouncer You should see something similar to this: ``` -psql (12.4, server 1.14.0/bouncer) +psql ({{< param postgresVersion >}}, server 1.14.0/bouncer) Type "help" for help. pgbouncer=# @@ -130,6 +130,77 @@ SHOW stats; Success, you have connected to pgBouncer! +## Setup pgBouncer with TLS + +Similarly to how you can [setup TLS for PostgreSQL]({{< relref "tutorial/tls.md" >}}), you can set up TLS connections for pgBouncer. To do this, the PostgreSQL Operator takes the following steps: + +- Ensuring TLS communication between a client (e.g. `psql`, your application, etc.) and pgBouncer +- Ensuring TLS communication between pgBouncer and PostgreSQL + +When TLS is enabled, the PostgreSQL Operator configures pgBouncer to require each client to use TLS to communicate with pgBouncer. Additionally, the PostgreSQL Operator requires that pgBouncer and the PostgreSQL cluster share the same certificate authority (CA) bundle, which allows for pgBouncer to communicate with the PostgreSQL cluster using PostgreSQL's [`verify-ca` SSL mode](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-PROTECTION). + +The below guide will show you how set up TLS for pgBouncer. + +### Prerequisites + +In order to set up TLS connections for pgBouncer, you must first [enable TLS on your PostgreSQL cluster]({{< relref "tutorial/tls.md" >}}). + +For the purposes of this exercise, we will re-use the Secret TLS keypair `hippo-tls-keypair` that was created for the PostgreSQL server. This is only being done for convenience: you can substitute `hippo-tls-keypair` with a different TLS key pair as long as it can be verified by the certificate authority (CA) that you selected for your PostgreSQL cluster. Recall that the certificate authority (CA) bundle is stored in a Secret named `postgresql-ca`. + +### Create pgBouncer with TLS + +Knowing that our TLS key pair is stored in a Secret called `hippo-tls-keypair`, you can setup pgBouncer with TLS using the following command: + +``` +pgo create pgbouncer hippo --tls-secret=hippo-tls-keypair +``` + +And that's it! So long as the prerequisites are satisfied, this will create a pgBouncer instance that is TLS enabled. + +Don't believe it? Try logging in. First, ensure you have a port-forward from pgBouncer to your host machine: + +``` +kubectl -n pgo port-forward svc/hippo-pgbouncer 5432:5432 +``` + +Then, connect to the pgBouncer instances: + +``` +PGPASSWORD=securerandomlygeneratedpassword psql -h localhost -p 5432 -U testuser hippo +``` + +You should see something similar to this: + +``` +psql ({{< param postgresVersion >}}) +SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, bits: 256, compression: off) +Type "help" for help. + +hippo=> +``` + +Still don't believe it? You can verify your connection using the PostgreSQL `get_backend_pid()` function and the [`pg_stat_ssl`](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-SSL-VIEW) monitoring view: + +``` +hippo=> SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid(); + pid | ssl | version | cipher | bits | compression | client_dn | client_serial | issuer_dn +-------+-----+---------+------------------------+------+-------------+-----------+---------------+----------- + 15653 | t | TLSv1.3 | TLS_AES_256_GCM_SHA384 | 256 | f | | | +(1 row) +``` + +### Create a PostgreSQL cluster with pgBouncer and TLS + +Want to create a PostgreSQL cluster with pgBouncer with TLS enabled? You can with the [`pgo create cluster`]({{< relref "pgo-client/reference/pgo_create_cluster.md" >}}) command and using the `--pgbouncer-tls-secret` flag. Using the same Secrets that were created in the [creating a PostgreSQL cluster with TLS]({{ relref "tutorial/tls.md" }}) tutorial, you can create a PostgreSQL cluster with pgBouncer and TLS with the following command: + +``` +pgo create cluster hippo \ + --server-ca-secret=postgresql-ca \ + --server-tls-secret=hippo-tls-keypair \ + --pgbouncer \ + --pgbouncer-tls-secret=hippo-tls-keypair +``` + ## Customize CPU / Memory for pgBouncer ### Provisioning diff --git a/docs/layouts/partials/flex/body-aftercontent.html b/docs/layouts/partials/flex/body-aftercontent.html new file mode 100644 index 0000000000..6637c4c6be --- /dev/null +++ b/docs/layouts/partials/flex/body-aftercontent.html @@ -0,0 +1,44 @@ +
+ {{ partial "next-prev-page.html" . }} +
+ + + + +
+ +
+ {{if .Params.tags }} +
+ {{ range $index, $tag := .Params.tags }} + {{ $tag }} + {{ end }} +
+ {{end}} + + {{with .Params.LastModifierDisplayName}} +
+ {{ . }} +
+ {{end}} + +{{ if not .Page.Lastmod.IsZero }} +
+ {{T "last-update-on"}} {{ .Page.Lastmod.Format "02/01/2006" }} +
+ {{end}} + +
+ + +
+
+ +
+

© 2017 - 2023 Crunchy Data Solutions, Inc.

+
+ + +
+ +{{ partial "flex/scripts.html" . }} diff --git a/docs/static/Operator-Architecture-wCRDs.png b/docs/static/Operator-Architecture-wCRDs.png index 291cbefef3..8e41f46031 100644 Binary files a/docs/static/Operator-Architecture-wCRDs.png and b/docs/static/Operator-Architecture-wCRDs.png differ diff --git a/docs/static/Operator-Architecture.png b/docs/static/Operator-Architecture.png index aa8a43a134..8e41f46031 100644 Binary files a/docs/static/Operator-Architecture.png and b/docs/static/Operator-Architecture.png differ diff --git a/docs/static/crunchy-logo.jpg b/docs/static/crunchy-logo.jpg deleted file mode 100644 index 01f9c9b1a4..0000000000 Binary files a/docs/static/crunchy-logo.jpg and /dev/null differ diff --git a/docs/static/logos/TRADEMARKS.md b/docs/static/logos/TRADEMARKS.md new file mode 100644 index 0000000000..e97d80757d --- /dev/null +++ b/docs/static/logos/TRADEMARKS.md @@ -0,0 +1,143 @@ +# PGO Trademark Guidelines + +## 1. Introduction + +This document - the "Policy" - outlines the policy of The PGO Project (the "Project") for the use of our trademarks. + +A trademark’s role is to assure consumers about the quality of the associated products or services. Because an open source license allows you to modify the copyrighted software, we cannot be sure your modified software will not mislead recipients if it is distributed under our trademarks. So, this Policy describes when you may or may not use our trademarks. + +In this Policy, we are not trying to limit the lawful use of our trademarks, but rather describe what we consider lawful use. Trademark law can be ambiguous, so we hope to clarify whether we will consider your use permitted or non-infringing. + +The following sections describe the trademarks this Policy covers, as well as trademark uses we permit. If you want to use our trademarks in ways this Policy doesn’t address, please see "Where to get further information" below for contact information. Any use that does not comply with this Policy, or for which we have not separately provided written permission, is not a use we have approved. + +## 2. We are committed to open source principles + +We want to encourage and facilitate community use of our trademarks in a way that ensures the trademarks are meaningful source and quality indicators for our software and the associated goods and services and continue to embody the high reputation of the software and its associated community. This Policy therefore balances our need to ensure our trademarks remain reliable quality indicators and our community members’ desire to be full Project participants. + +## 3. Trademarks subject to the Policy + +Our trademarks + +This Policy covers: + +### 3.1 Our word trademarks and service marks (the "Word Marks"): + +PGO + +### 3.2. Our logo (the "Logo"): + +PGO: The Postgres Operator from Crunchy Data + +### 3.3 And the unique visual styling of our website (the "Trade Dress"). + +This Policy encompasses all Project trademarks and service marks, whether Word Marks, Logos or Trade Dress, which we collectively call the “Marks." We might not have registered some Marks, but this Policy covers our Marks regardless. + +## 4. Universal considerations for all uses + +Whenever you use a Mark, you must not mislead anyone, either directly or by omission, about what they are getting and from whom. The law reflects this requirement in two major ways described below: it prohibits creating a "likelihood of confusion," but allows for "nominative use." + +For example, you cannot say you are distributing PGO software when you're distributing a modified version of it, because you likely would confuse people, since they are not getting the same features and functionality they would get if they downloaded the software from us. You also cannot use our Logo on your website to suggest your website is an official website or we endorse your website. + +You can, though, say, for example, you like the PGO software, you are a PGO community participant, you are providing unmodified PGO software, or you wrote a book describing how to use the PGO software. + +This fundamental requirement - that it is always clear to people what they are getting and from whom - is reflected throughout this Policy. It should guide you if you are unsure about how you are using the Marks. + +In addition: + +You may not use the Marks in association with software use or distribution if you don’t comply with the license for the software. + +You may not use or register the Marks as part of your own trademark, service mark, domain name, company name, trade name, product name or service name. + +Trademark law does not allow you to use names or trademarks that are too similar to ours. You therefore may not use an obvious Mark variant or phonetic equivalent, foreign language equivalent, takeoff, or abbreviation for a similar or compatible product or service. + +You will not acquire rights in the Marks, and any goodwill you generate using the Marks inures solely to our benefit. +## 5. Use for software + +See universal considerations for all uses, above, which also apply. + +### 5.1 Uses we consider non-infringing + +#### 5.1.1 Distributing unmodified source code or unmodified executable code we have compiled + +When you redistribute our unmodified software, you are not changing its quality or nature. Therefore, you may retain the Word Marks and Logos we have placed on the software, to identify your redistributed software whether you redistribute by optical media, memory stick or download of unmodified source and executable code. This only applies if you are redistributing official software from this Project that you have not changed. You can find the Logo files [here](/). + +#### 5.1.2 Distributing executable code you have compiled, or modified code + +You may use the Word Marks, but not the Logos, to describe the software’s origin, that is, that the code you are distributing is a modification of our software. You may say, for example, "this software is derived from the source code from the PGO Project." +Of course, you can place your own trademarks or logos on software to which you have made substantive modifications, because by modifying the software, you have become the origin of the modified software. + +#### 5.1.3 Statements about compatibility, interoperability or derivation + +You may use the Word Marks, but not the Logos, to describe the relationship between your software and ours. You should use Our Mark after a verb or preposition that describes that relationship. So, you may say, for example, "Bob's plug-in for PGO," but may not say "Bob's PGO plug-in." + +#### 5.1.4 Using trademarks to show community affiliation + +This section discusses using our Marks for application themes, skins and personas. We discuss using our Marks on websites below. +You may use the Word Marks and the Logos in themes, personas, or skins to show your Project support, provided the use is non-commercial and clearly decorative, as contrasted with a use that appears to be the branding for a website or application. + +### 5.2 Permitted uses + +#### 5.2.1 Distributing unmodified software + +You may use the Word Marks and Logos to distribute executable code if you make the code from official Project source code using the procedure for creating an executable found at [https://access.crunchydata.com/documentation/postgres-operator/latest/installation/](https://access.crunchydata.com/documentation/postgres-operator/latest/installation/). + +#### 5.3 Unpermitted uses we consider infringing + +We will likely consider it an infringement to use the Marks in software that combines our software with another software program. In addition to creating a single executable for both software programs, we would consider your software "combined" with ours if installing our software automatically installs yours. We would not consider your software "combined" with ours if it is on the same media but requires separate, independent action to install. + +## 6. Use for non-software goods and services + +See universal considerations for all uses, above, which also apply. + +### 6.1 Uses we consider non-infringing + +#### 6.1.1 Websites + +You may use the Word Marks and Logos on your webpage to show your Project support if: + +- Your own branding or naming is more prominent than any Project Marks; +- The Logos hyperlink to the Project website: [https://github.com/CrunchyData/postgres-operator](https://github.com/CrunchyData/postgres-operator); +- The site does not mislead customers into thinking your website, service, or product is our website, service, or product; and +- The site clearly states the Project does not affiliate with or endorse you. + +#### 6.1.2 Publishing and presenting + +You can use the Word Marks in book and article titles, and the Logo in illustrations within a document, if the use does not suggest we published, endorse, or agree with your work. + +#### 6.1.3 Events + +You can use the Logo to promote the software and Project at events. + +### 6.2 Permitted uses + +#### 6.2.1 Meetups and user groups + +You can use the Word Marks as part of your meetup or user group name if: + +- The group’s main focus is the software; +- Any software or services the group provides are without cost; +- The group does not make a profit; +- Any charge to attend meetings is only to cover the cost of the venue, food and drink. + +The universal considerations for all uses, above, still apply: specifically, you may not use or register the Marks as part of your own trademark, service mark, domain name, company name, trade name, product name or service name. + +### 6.3 Unpermitted uses we consider infringing + +We will likely consider it an infringement to use the Marks as part of a domain name or subdomain. +We also would likely consider it an infringement to use the Marks on for-sale promotional goods. + +## 7 General Information + +### 7.1 Trademark legends + +If you are using our Marks in a way described in the sections entitled "Permitted uses," put the following notice at the foot of the page where you have used the Mark (or, if in a book, on the credits page), on packaging or labeling, and on advertising or marketing materials: "The PGO Project is a trademark of Crunchy Data Solutions, Inc., used with permission." + +### 7.2 What to do when you see abuse + +If you are aware of a confusing use or misuse of the Marks, we would appreciate you bringing it to our attention. Please contact us at [trademarks@crunchydata.com](mailto:trademarks@crunchydata.com) so we can investigate it further. + +### 7.3 Where to get further information + +If you have questions, wish to speak about using our Marks in ways the Policy doesn’t address, or see abuse of our Marks, please send an email to [trademarks@crunchydata.com](mailto:trademarks@crunchydata.com). + +We based these guidelines on the Model Trademark Guidelines, available at [http://www.modeltrademarkguidelines.org](http://www.modeltrademarkguidelines.org), used under a Creative Commons Attribution 3.0 Unported license: [https://creativecommons.org/licenses/by/3.0/deed.en_US](https://creativecommons.org/licenses/by/3.0/deed.en_US). diff --git a/docs/static/logos/pgo.png b/docs/static/logos/pgo.png new file mode 100644 index 0000000000..9d38c8f859 Binary files /dev/null and b/docs/static/logos/pgo.png differ diff --git a/docs/static/logos/pgo.svg b/docs/static/logos/pgo.svg new file mode 100644 index 0000000000..d72f9d7810 --- /dev/null +++ b/docs/static/logos/pgo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/examples/create-by-resource/fromcrd.json b/examples/create-by-resource/fromcrd.json index 987ec53d55..2184f60181 100644 --- a/examples/create-by-resource/fromcrd.json +++ b/examples/create-by-resource/fromcrd.json @@ -6,33 +6,17 @@ "current-primary": "fromcrd" }, "labels": { - "autofail": "true", - "crunchy-pgbadger": "false", "crunchy-pgha-scope": "fromcrd", - "crunchy-postgres-exporter": "false", - "current-primary": "fromcrd", "deployment-name": "fromcrd", "name": "fromcrd", "pg-cluster": "fromcrd", - "pg-pod-anti-affinity": "", - "pgo-backrest": "true", - "pgo-version": "4.5.0", - "pgouser": "pgoadmin", - "primary": "true" + "pgo-version": "4.6.10", + "pgouser": "pgoadmin" }, "name": "fromcrd", "namespace": "pgouser1" }, "spec": { - "ArchiveStorage": { - "accessmode": "", - "matchLabels": "", - "name": "", - "size": "", - "storageclass": "", - "storagetype": "", - "supplementalgroups": "" - }, "BackrestStorage": { "accessmode": "ReadWriteOnce", "matchLabels": "", @@ -60,45 +44,23 @@ "storagetype": "dynamic", "supplementalgroups": "" }, - "backrestResources": {}, "ccpimage": "crunchy-postgres-ha", - "ccpimagetag": "centos7-12.4-4.5.0", + "ccpimagetag": "ubi8-13.10-4.6.10", "clustername": "fromcrd", - "customconfig": "", "database": "userdb", "exporterport": "9187", "name": "fromcrd", "namespace": "pgouser1", - "pgBouncer": { - "replicas": 0, - "resources": {} - }, "pgbadgerport": "10000", "podPodAntiAffinity": { "default": "preferred", "pgBackRest": "preferred", "pgBouncer": "preferred" }, - "policies": "", "port": "5432", - "primarysecretname": "fromcrd-primaryuser-secret", - "replicas": "0", - "rootsecretname": "fromcrd-postgres-secret", - "secretfrom": "", - "shutdown": false, - "standby": false, - "status": "", - "syncReplication": null, - "tablespaceMounts": {}, - "tls": {}, "user": "testuser", "userlabels": { - "crunchy-postgres-exporter": "false", - "pg-pod-anti-affinity": "", - "pgo-version": "4.5.0", - "pgouser": "pgoadmin", - "pgo-backrest": "true" - }, - "usersecretname": "fromcrd-testuser-secret" + "pgo-version": "4.6.10" + } } } diff --git a/examples/create-by-resource/primaryuser-secret.yaml b/examples/create-by-resource/primaryuser-secret.yaml deleted file mode 100644 index 15ee8ad665..0000000000 --- a/examples/create-by-resource/primaryuser-secret.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -data: - password: d0ZvYWlRZFhPTQ== - username: cHJpbWFyeXVzZXI= -kind: Secret -metadata: - labels: - pg-cluster: fromcrd - name: fromcrd-primaryuser-secret - namespace: pgouser1 -type: Opaque diff --git a/examples/create-by-resource/run.sh b/examples/create-by-resource/run.sh index 1cdefdda77..bca4f09edd 100755 --- a/examples/create-by-resource/run.sh +++ b/examples/create-by-resource/run.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2019 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -18,6 +18,8 @@ ######### DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +# PGO_CMD should either be "kubectl" or "oc" -- defaulting to kubectl +PGO_CMD=${PGO_CMD:-kubectl} # A namespace that exists in NAMESPACE env var - see examples/envs.sh export NS=pgouser1 @@ -41,49 +43,9 @@ rm $DIR/fromcrd-key $DIR/fromcrd-key.pub # EXAMPLE RUN # ############### -# generate a SSH public/private keypair for use by pgBackRest -ssh-keygen -t ed25519 -N '' -f $DIR/fromcrd-key - -# base64 encoded the keys for the generation of the Kube secret, and place -# them into variables temporarily -PUBLIC_KEY_TEMP=$(cat $DIR/fromcrd-key.pub | base64) -PRIVATE_KEY_TEMP=$(cat $DIR/fromcrd-key | base64) - -export PUBLIC_KEY="${PUBLIC_KEY_TEMP//[$'\n']}" -export PRIVATE_KEY="${PRIVATE_KEY_TEMP//[$'\n']}" - -unset PUBLIC_KEY_TEMP -unset PRIVATE_KEY_TEMP - -# create the backrest-repo-config example file and substitute in the newly -# created keys -cat <<-EOF > $DIR/backrest-repo-config.yaml -apiVersion: v1 -data: - authorized_keys: ${PUBLIC_KEY} - id_ed25519: ${PRIVATE_KEY} - ssh_host_ed25519_key: ${PRIVATE_KEY} - config: SG9zdCAqClN0cmljdEhvc3RLZXlDaGVja2luZyBubwpJZGVudGl0eUZpbGUgL3RtcC9pZF9lZDI1NTE5ClBvcnQgMjAyMgpVc2VyIHBnYmFja3Jlc3QK - sshd_config: IwkkT3BlbkJTRDogc3NoZF9jb25maWcsdiAxLjEwMCAyMDE2LzA4LzE1IDEyOjMyOjA0IG5hZGR5IEV4cCAkCgojIFRoaXMgaXMgdGhlIHNzaGQgc2VydmVyIHN5c3RlbS13aWRlIGNvbmZpZ3VyYXRpb24gZmlsZS4gIFNlZQojIHNzaGRfY29uZmlnKDUpIGZvciBtb3JlIGluZm9ybWF0aW9uLgoKIyBUaGlzIHNzaGQgd2FzIGNvbXBpbGVkIHdpdGggUEFUSD0vdXNyL2xvY2FsL2JpbjovdXNyL2JpbgoKIyBUaGUgc3RyYXRlZ3kgdXNlZCBmb3Igb3B0aW9ucyBpbiB0aGUgZGVmYXVsdCBzc2hkX2NvbmZpZyBzaGlwcGVkIHdpdGgKIyBPcGVuU1NIIGlzIHRvIHNwZWNpZnkgb3B0aW9ucyB3aXRoIHRoZWlyIGRlZmF1bHQgdmFsdWUgd2hlcmUKIyBwb3NzaWJsZSwgYnV0IGxlYXZlIHRoZW0gY29tbWVudGVkLiAgVW5jb21tZW50ZWQgb3B0aW9ucyBvdmVycmlkZSB0aGUKIyBkZWZhdWx0IHZhbHVlLgoKIyBJZiB5b3Ugd2FudCB0byBjaGFuZ2UgdGhlIHBvcnQgb24gYSBTRUxpbnV4IHN5c3RlbSwgeW91IGhhdmUgdG8gdGVsbAojIFNFTGludXggYWJvdXQgdGhpcyBjaGFuZ2UuCiMgc2VtYW5hZ2UgcG9ydCAtYSAtdCBzc2hfcG9ydF90IC1wIHRjcCAjUE9SVE5VTUJFUgojClBvcnQgMjAyMgojQWRkcmVzc0ZhbWlseSBhbnkKI0xpc3RlbkFkZHJlc3MgMC4wLjAuMAojTGlzdGVuQWRkcmVzcyA6OgoKSG9zdEtleSAvc3NoZC9zc2hfaG9zdF9lZDI1NTE5X2tleQoKIyBDaXBoZXJzIGFuZCBrZXlpbmcKI1Jla2V5TGltaXQgZGVmYXVsdCBub25lCgojIExvZ2dpbmcKI1N5c2xvZ0ZhY2lsaXR5IEFVVEgKU3lzbG9nRmFjaWxpdHkgQVVUSFBSSVYKI0xvZ0xldmVsIElORk8KCiMgQXV0aGVudGljYXRpb246CgojTG9naW5HcmFjZVRpbWUgMm0KUGVybWl0Um9vdExvZ2luIG5vClN0cmljdE1vZGVzIG5vCiNNYXhBdXRoVHJpZXMgNgojTWF4U2Vzc2lvbnMgMTAKClB1YmtleUF1dGhlbnRpY2F0aW9uIHllcwoKIyBUaGUgZGVmYXVsdCBpcyB0byBjaGVjayBib3RoIC5zc2gvYXV0aG9yaXplZF9rZXlzIGFuZCAuc3NoL2F1dGhvcml6ZWRfa2V5czIKIyBidXQgdGhpcyBpcyBvdmVycmlkZGVuIHNvIGluc3RhbGxhdGlvbnMgd2lsbCBvbmx5IGNoZWNrIC5zc2gvYXV0aG9yaXplZF9rZXlzCkF1dGhvcml6ZWRLZXlzRmlsZQkvc3NoZC9hdXRob3JpemVkX2tleXMKCiNBdXRob3JpemVkUHJpbmNpcGFsc0ZpbGUgbm9uZQoKI0F1dGhvcml6ZWRLZXlzQ29tbWFuZCBub25lCiNBdXRob3JpemVkS2V5c0NvbW1hbmRVc2VyIG5vYm9keQoKIyBGb3IgdGhpcyB0byB3b3JrIHlvdSB3aWxsIGFsc28gbmVlZCBob3N0IGtleXMgaW4gL2V0Yy9zc2gvc3NoX2tub3duX2hvc3RzCiNIb3N0YmFzZWRBdXRoZW50aWNhdGlvbiBubwojIENoYW5nZSB0byB5ZXMgaWYgeW91IGRvbid0IHRydXN0IH4vLnNzaC9rbm93bl9ob3N0cyBmb3IKIyBIb3N0YmFzZWRBdXRoZW50aWNhdGlvbgojSWdub3JlVXNlcktub3duSG9zdHMgbm8KIyBEb24ndCByZWFkIHRoZSB1c2VyJ3Mgfi8ucmhvc3RzIGFuZCB+Ly5zaG9zdHMgZmlsZXMKI0lnbm9yZVJob3N0cyB5ZXMKCiMgVG8gZGlzYWJsZSB0dW5uZWxlZCBjbGVhciB0ZXh0IHBhc3N3b3JkcywgY2hhbmdlIHRvIG5vIGhlcmUhCiNQYXNzd29yZEF1dGhlbnRpY2F0aW9uIHllcwojUGVybWl0RW1wdHlQYXNzd29yZHMgbm8KUGFzc3dvcmRBdXRoZW50aWNhdGlvbiBubwoKIyBDaGFuZ2UgdG8gbm8gdG8gZGlzYWJsZSBzL2tleSBwYXNzd29yZHMKQ2hhbGxlbmdlUmVzcG9uc2VBdXRoZW50aWNhdGlvbiB5ZXMKI0NoYWxsZW5nZVJlc3BvbnNlQXV0aGVudGljYXRpb24gbm8KCiMgS2VyYmVyb3Mgb3B0aW9ucwojS2VyYmVyb3NBdXRoZW50aWNhdGlvbiBubwojS2VyYmVyb3NPckxvY2FsUGFzc3dkIHllcwojS2VyYmVyb3NUaWNrZXRDbGVhbnVwIHllcwojS2VyYmVyb3NHZXRBRlNUb2tlbiBubwojS2VyYmVyb3NVc2VLdXNlcm9rIHllcwoKIyBHU1NBUEkgb3B0aW9ucwojR1NTQVBJQXV0aGVudGljYXRpb24geWVzCiNHU1NBUElDbGVhbnVwQ3JlZGVudGlhbHMgbm8KI0dTU0FQSVN0cmljdEFjY2VwdG9yQ2hlY2sgeWVzCiNHU1NBUElLZXlFeGNoYW5nZSBubwojR1NTQVBJRW5hYmxlazV1c2VycyBubwoKIyBTZXQgdGhpcyB0byAneWVzJyB0byBlbmFibGUgUEFNIGF1dGhlbnRpY2F0aW9uLCBhY2NvdW50IHByb2Nlc3NpbmcsCiMgYW5kIHNlc3Npb24gcHJvY2Vzc2luZy4gSWYgdGhpcyBpcyBlbmFibGVkLCBQQU0gYXV0aGVudGljYXRpb24gd2lsbAojIGJlIGFsbG93ZWQgdGhyb3VnaCB0aGUgQ2hhbGxlbmdlUmVzcG9uc2VBdXRoZW50aWNhdGlvbiBhbmQKIyBQYXNzd29yZEF1dGhlbnRpY2F0aW9uLiAgRGVwZW5kaW5nIG9uIHlvdXIgUEFNIGNvbmZpZ3VyYXRpb24sCiMgUEFNIGF1dGhlbnRpY2F0aW9uIHZpYSBDaGFsbGVuZ2VSZXNwb25zZUF1dGhlbnRpY2F0aW9uIG1heSBieXBhc3MKIyB0aGUgc2V0dGluZyBvZiAiUGVybWl0Um9vdExvZ2luIHdpdGhvdXQtcGFzc3dvcmQiLgojIElmIHlvdSBqdXN0IHdhbnQgdGhlIFBBTSBhY2NvdW50IGFuZCBzZXNzaW9uIGNoZWNrcyB0byBydW4gd2l0aG91dAojIFBBTSBhdXRoZW50aWNhdGlvbiwgdGhlbiBlbmFibGUgdGhpcyBidXQgc2V0IFBhc3N3b3JkQXV0aGVudGljYXRpb24KIyBhbmQgQ2hhbGxlbmdlUmVzcG9uc2VBdXRoZW50aWNhdGlvbiB0byAnbm8nLgojIFdBUk5JTkc6ICdVc2VQQU0gbm8nIGlzIG5vdCBzdXBwb3J0ZWQgaW4gUmVkIEhhdCBFbnRlcnByaXNlIExpbnV4IGFuZCBtYXkgY2F1c2Ugc2V2ZXJhbAojIHByb2JsZW1zLgpVc2VQQU0geWVzIAoKI0FsbG93QWdlbnRGb3J3YXJkaW5nIHllcwojQWxsb3dUY3BGb3J3YXJkaW5nIHllcwojR2F0ZXdheVBvcnRzIG5vClgxMUZvcndhcmRpbmcgeWVzCiNYMTFEaXNwbGF5T2Zmc2V0IDEwCiNYMTFVc2VMb2NhbGhvc3QgeWVzCiNQZXJtaXRUVFkgeWVzCiNQcmludE1vdGQgeWVzCiNQcmludExhc3RMb2cgeWVzCiNUQ1BLZWVwQWxpdmUgeWVzCiNVc2VMb2dpbiBubwpVc2VQcml2aWxlZ2VTZXBhcmF0aW9uIG5vCiNQZXJtaXRVc2VyRW52aXJvbm1lbnQgbm8KI0NvbXByZXNzaW9uIGRlbGF5ZWQKI0NsaWVudEFsaXZlSW50ZXJ2YWwgMAojQ2xpZW50QWxpdmVDb3VudE1heCAzCiNTaG93UGF0Y2hMZXZlbCBubwojVXNlRE5TIHllcwojUGlkRmlsZSAvdmFyL3J1bi9zc2hkLnBpZAojTWF4U3RhcnR1cHMgMTA6MzA6MTAwCiNQZXJtaXRUdW5uZWwgbm8KI0Nocm9vdERpcmVjdG9yeSBub25lCiNWZXJzaW9uQWRkZW5kdW0gbm9uZQoKIyBubyBkZWZhdWx0IGJhbm5lciBwYXRoCiNCYW5uZXIgbm9uZQoKIyBBY2NlcHQgbG9jYWxlLXJlbGF0ZWQgZW52aXJvbm1lbnQgdmFyaWFibGVzCkFjY2VwdEVudiBMQU5HIExDX0NUWVBFIExDX05VTUVSSUMgTENfVElNRSBMQ19DT0xMQVRFIExDX01PTkVUQVJZIExDX01FU1NBR0VTCkFjY2VwdEVudiBMQ19QQVBFUiBMQ19OQU1FIExDX0FERFJFU1MgTENfVEVMRVBIT05FIExDX01FQVNVUkVNRU5UCkFjY2VwdEVudiBMQ19JREVOVElGSUNBVElPTiBMQ19BTEwgTEFOR1VBR0UKQWNjZXB0RW52IFhNT0RJRklFUlMKCiMgb3ZlcnJpZGUgZGVmYXVsdCBvZiBubyBzdWJzeXN0ZW1zClN1YnN5c3RlbQlzZnRwCS91c3IvbGliZXhlYy9vcGVuc3NoL3NmdHAtc2VydmVyCgojIEV4YW1wbGUgb2Ygb3ZlcnJpZGluZyBzZXR0aW5ncyBvbiBhIHBlci11c2VyIGJhc2lzCiNNYXRjaCBVc2VyIGFub25jdnMKIwlYMTFGb3J3YXJkaW5nIG5vCiMJQWxsb3dUY3BGb3J3YXJkaW5nIG5vCiMJUGVybWl0VFRZIG5vCiMJRm9yY2VDb21tYW5kIGN2cyBzZXJ2ZXI= -kind: Secret -metadata: - labels: - pg-cluster: fromcrd - pgo-backrest-repo: "true" - name: fromcrd-backrest-repo-config - namespace: ${NS} -type: Opaque -EOF - -# unset the *_KEY environmental variables -unset PUBLIC_KEY -unset PRIVATE_KEY - # create the required postgres credentials for the fromcrd cluster $PGO_CMD -n $NS create -f $DIR/postgres-secret.yaml -$PGO_CMD -n $NS create -f $DIR/primaryuser-secret.yaml $PGO_CMD -n $NS create -f $DIR/testuser-secret.yaml -$PGO_CMD -n $NS create -f $DIR/backrest-repo-config.yaml # create the pgcluster CRD for the fromcrd cluster $PGO_CMD -n $NS create -f $DIR/fromcrd.json diff --git a/examples/custom-config/create.sh b/examples/custom-config/create.sh index b0599f1b37..e502bdbada 100755 --- a/examples/custom-config/create.sh +++ b/examples/custom-config/create.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -28,11 +28,8 @@ function echo_info() { DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -#Error if PGO_CMD not set -if [[ -z ${PGO_CMD} ]] -then - echo_err "PGO_CMD is not set." -fi +# PGO_CMD should either be "kubectl" or "oc" -- defaulting to kubectl +PGO_CMD=${PGO_CMD:-kubectl} #Error is PGO_NAMESPACE not set if [[ -z ${PGO_NAMESPACE} ]] diff --git a/examples/custom-config/postgres-ha.yaml b/examples/custom-config/postgres-ha.yaml index 0f4cd6fbab..5d823d4a81 100644 --- a/examples/custom-config/postgres-ha.yaml +++ b/examples/custom-config/postgres-ha.yaml @@ -12,10 +12,9 @@ bootstrap: shared_buffers: 256MB temp_buffers: 10MB work_mem: 5MB -postgresql: +postgresql: pg_hba: - local all postgres peer - - local all crunchyadm peer - host replication primaryuser 0.0.0.0/0 md5 - host all primaryuser 0.0.0.0/0 reject - host all postgres 0.0.0.0/0 md5 diff --git a/examples/custom-config/setup.sql b/examples/custom-config/setup.sql index 206005eb8a..01942a034e 100644 --- a/examples/custom-config/setup.sql +++ b/examples/custom-config/setup.sql @@ -1,5 +1,5 @@ /* - * Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + * Copyright 2017 - 2023 Crunchy Data Solutions, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at diff --git a/examples/envs.sh b/examples/envs.sh index 10758085bd..3824b69682 100644 --- a/examples/envs.sh +++ b/examples/envs.sh @@ -19,8 +19,8 @@ export PGO_CONF_DIR=$PGOROOT/installers/ansible/roles/pgo-operator/files # the version of the Operator you run is set by these vars export PGO_IMAGE_PREFIX=registry.developers.crunchydata.com/crunchydata -export PGO_BASEOS=centos7 -export PGO_VERSION=4.5.0 +export PGO_BASEOS=ubi8 +export PGO_VERSION=4.6.10 export PGO_IMAGE_TAG=$PGO_BASEOS-$PGO_VERSION # for setting the pgo apiserver port, disabling TLS or not verifying TLS diff --git a/examples/helm/README.md b/examples/helm/README.md index 09d06cbeb3..fc2a0adec2 100644 --- a/examples/helm/README.md +++ b/examples/helm/README.md @@ -1,23 +1,23 @@ -# create-cluster +# Create a Postgres Cluster -This is a working example of how to create a cluster via the crd workflow -using a helm chart +This is a working example of how to create a PostgreSQL cluster [Helm](https://helm.sh/) chart. -## Assumptions -This example assumes you have the Crunchy PostgreSQL Operator installed -in a namespace called pgo. +## Prerequisites -## Helm -Helm will also need to be installed for this example to run +### Postgres Operator -## Documenation -Please see the documentation for more guidance using custom resources: +This example assumes you have the [Crunchy PostgreSQL Operator installed](https://access.crunchydata.com/documentation/postgres-operator/latest/quickstart/) in a namespace called `pgo`. + +### Helm + +To execute a Helm chart, [Helm](https://helm.sh/) needs to be installed in your local environment. -https://access.crunchydata.com/documentation/postgres-operator/latest/custom-resources/ +## Setup +If you are running Postgres Operator 4.5.1 or later, you can skip the step below. + +### Before 4.5.1 -## Example set up and execution -create a certs directy and generate certs ``` cd postgres-operator/examples/helm/create-cluster @@ -29,36 +29,84 @@ export pgo_cluster_name=hippo # generate a SSH public/private keypair for use by pgBackRest ssh-keygen -t ed25519 -N '' -f "${pgo_cluster_name}-key" - ``` -For this example we will deploy the cluster into the pgo -namespace where the opertor is installed and running. -return to the create-cluster directory +## Running the Example + +### Download the Helm Chart + +For this example we will deploy the cluster into the `pgo` namespace where the Postgres Operator is installed and running. + +You will need to download this Helm chart. One way to do this is by cloning the Postgres Operator project into your local environment: + ``` -cd postgres-operator/examples/helm/create-cluster +git clone https://github.com/CrunchyData/postgres-operator.git ``` -The following commands will allow you to execute a dry run first with debug -if you want to verify everthing is set correctly. Then after everything looks good -run the install command with out the flags +Go into the directory that contains the Helm chart for creating a PostgreSQL cluster: + ``` -helm install --dry-run --debug postgres-operator-create-cluster . -n pgo +cd postgres-operator/examples/helm +``` + +### Set Values + +There are only three required values to run the Helm chart: + +- `name`: The name of your PostgreSQL cluster. +- `namespace`: The namespace for where the PostgreSQL cluster should be deployed. +- `password`: A password for the user that will be allowed to connect to the database. + +The following values can also be set: + +- `cpu`: The CPU limit for the PostgreSQL cluster. Follows standard Kubernetes formatting. +- `diskSize`: The size of the PVC for the PostgreSQL cluster. Follows standard Kubernetes formatting. +- `ha`: Whether or not to deploy a high availability PostgreSQL cluster. Can be either `true` or `false`, defaults to `false`. +- `imagePrefix`: The prefix of the container images to use for this PostgreSQL cluster. Default to `registry.developers.crunchydata.com/crunchydata`. +- `image`: The name of the container image to use for the PostgreSQL cluster. Defaults to `crunchy-postgres-ha`. +- `imageTag`: The container image tag to use. Defaults to `ubi8-13.10-4.6.10`. +- `memory`: The memory limit for the PostgreSQL cluster. Follows standard Kubernetes formatting. +- `monitoring`: Whether or not to enable monitoring / metrics collection for this PostgreSQL instance. Can either be `true` or `false`, defaults to `false`. -helm install postgres-operator-create-cluster . -n pgo +### Execute the Chart + +The following commands will allow you to execute a dry run first with debug +if you want to verify everything is set correctly. Then after everything looks +good run the install command with out the flags: + +``` +helm install -n pgo --dry-run --debug postgres-cluster postgres +helm install -n pgo postgres-cluster postgres ``` + +This will deploy a PostgreSQL cluster with the specified name into the specified namespace. + ## Verify -Now you can your Hippo cluster has deployed into the pgo -namespace by running these few commands + +You can verify that your PostgreSQL cluster is deployed into the `pgo` namespace by running the following commands: ``` kubectl get all -n pgo +``` -pgo test hippo -n pgo +Once your PostgreSQL cluster is provisioned, you can connect to it. Assuming you are using the default value of `hippo` for the name of the cluster, in a new terminal window, set up a port forward to the PostgreSQL cluster: -pgo show cluster hippo -n pgo ``` -## NOTE -As of operator version 4.5.0 when using helm uninstall you will have to manually -clean up some left over artifacts afer running the unistall +kubectl -n pgo port-forward svc/hippo 5432:5432 +``` + +Still assuming your are using the default values for this Helm chart, you can connect to the Postgres cluster with the following command: + +``` +PGPASSWORD="W4tch0ut4hippo$" psql -h localhost -U hippo hippo +``` + +## Notes + +Prior to PostgreSQL Operator 4.6.0, you will have to manually clean up some of the artifacts when running `helm uninstall`. + +## Additional Resources + +Please see the documentation for more guidance using custom resources: +[https://access.crunchydata.com/documentation/postgres-operator/latest/custom-resources/](https://access.crunchydata.com/documentation/postgres-operator/latest/custom-resources/) diff --git a/examples/helm/create-cluster/templates/backrest-repo-config.yaml b/examples/helm/create-cluster/templates/backrest-repo-config.yaml deleted file mode 100644 index 166d0b3dcd..0000000000 --- a/examples/helm/create-cluster/templates/backrest-repo-config.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -data: - authorized_keys: {{ .Files.Get "certs/hippo-key.pub" | b64enc }} - config: SG9zdCAqClN0cmljdEhvc3RLZXlDaGVja2luZyBubwpJZGVudGl0eUZpbGUgL3RtcC9pZF9lZDI1NTE5ClBvcnQgMjAyMgpVc2VyIHBnYmFja3Jlc3QK - id_ed25519: {{ .Files.Get "certs/hippo-key" | b64enc }} - ssh_host_ed25519_key: {{ .Files.Get "certs/hippo-key" | b64enc }} - sshd_config: IwkkT3BlbkJTRDogc3NoZF9jb25maWcsdiAxLjEwMCAyMDE2LzA4LzE1IDEyOjMyOjA0IG5hZGR5IEV4cCAkCgojIFRoaXMgaXMgdGhlIHNzaGQgc2VydmVyIHN5c3RlbS13aWRlIGNvbmZpZ3VyYXRpb24gZmlsZS4gIFNlZQojIHNzaGRfY29uZmlnKDUpIGZvciBtb3JlIGluZm9ybWF0aW9uLgoKIyBUaGlzIHNzaGQgd2FzIGNvbXBpbGVkIHdpdGggUEFUSD0vdXNyL2xvY2FsL2JpbjovdXNyL2JpbgoKIyBUaGUgc3RyYXRlZ3kgdXNlZCBmb3Igb3B0aW9ucyBpbiB0aGUgZGVmYXVsdCBzc2hkX2NvbmZpZyBzaGlwcGVkIHdpdGgKIyBPcGVuU1NIIGlzIHRvIHNwZWNpZnkgb3B0aW9ucyB3aXRoIHRoZWlyIGRlZmF1bHQgdmFsdWUgd2hlcmUKIyBwb3NzaWJsZSwgYnV0IGxlYXZlIHRoZW0gY29tbWVudGVkLiAgVW5jb21tZW50ZWQgb3B0aW9ucyBvdmVycmlkZSB0aGUKIyBkZWZhdWx0IHZhbHVlLgoKIyBJZiB5b3Ugd2FudCB0byBjaGFuZ2UgdGhlIHBvcnQgb24gYSBTRUxpbnV4IHN5c3RlbSwgeW91IGhhdmUgdG8gdGVsbAojIFNFTGludXggYWJvdXQgdGhpcyBjaGFuZ2UuCiMgc2VtYW5hZ2UgcG9ydCAtYSAtdCBzc2hfcG9ydF90IC1wIHRjcCAjUE9SVE5VTUJFUgojClBvcnQgMjAyMgojQWRkcmVzc0ZhbWlseSBhbnkKI0xpc3RlbkFkZHJlc3MgMC4wLjAuMAojTGlzdGVuQWRkcmVzcyA6OgoKSG9zdEtleSAvc3NoZC9zc2hfaG9zdF9lZDI1NTE5X2tleQoKIyBDaXBoZXJzIGFuZCBrZXlpbmcKI1Jla2V5TGltaXQgZGVmYXVsdCBub25lCgojIExvZ2dpbmcKI1N5c2xvZ0ZhY2lsaXR5IEFVVEgKU3lzbG9nRmFjaWxpdHkgQVVUSFBSSVYKI0xvZ0xldmVsIElORk8KCiMgQXV0aGVudGljYXRpb246CgojTG9naW5HcmFjZVRpbWUgMm0KUGVybWl0Um9vdExvZ2luIG5vClN0cmljdE1vZGVzIG5vCiNNYXhBdXRoVHJpZXMgNgojTWF4U2Vzc2lvbnMgMTAKClB1YmtleUF1dGhlbnRpY2F0aW9uIHllcwoKIyBUaGUgZGVmYXVsdCBpcyB0byBjaGVjayBib3RoIC5zc2gvYXV0aG9yaXplZF9rZXlzIGFuZCAuc3NoL2F1dGhvcml6ZWRfa2V5czIKIyBidXQgdGhpcyBpcyBvdmVycmlkZGVuIHNvIGluc3RhbGxhdGlvbnMgd2lsbCBvbmx5IGNoZWNrIC5zc2gvYXV0aG9yaXplZF9rZXlzCiNBdXRob3JpemVkS2V5c0ZpbGUJL3BnY29uZi9hdXRob3JpemVkX2tleXMKQXV0aG9yaXplZEtleXNGaWxlCS9zc2hkL2F1dGhvcml6ZWRfa2V5cwoKI0F1dGhvcml6ZWRQcmluY2lwYWxzRmlsZSBub25lCgojQXV0aG9yaXplZEtleXNDb21tYW5kIG5vbmUKI0F1dGhvcml6ZWRLZXlzQ29tbWFuZFVzZXIgbm9ib2R5CgojIEZvciB0aGlzIHRvIHdvcmsgeW91IHdpbGwgYWxzbyBuZWVkIGhvc3Qga2V5cyBpbiAvZXRjL3NzaC9zc2hfa25vd25faG9zdHMKI0hvc3RiYXNlZEF1dGhlbnRpY2F0aW9uIG5vCiMgQ2hhbmdlIHRvIHllcyBpZiB5b3UgZG9uJ3QgdHJ1c3Qgfi8uc3NoL2tub3duX2hvc3RzIGZvcgojIEhvc3RiYXNlZEF1dGhlbnRpY2F0aW9uCiNJZ25vcmVVc2VyS25vd25Ib3N0cyBubwojIERvbid0IHJlYWQgdGhlIHVzZXIncyB+Ly5yaG9zdHMgYW5kIH4vLnNob3N0cyBmaWxlcwojSWdub3JlUmhvc3RzIHllcwoKIyBUbyBkaXNhYmxlIHR1bm5lbGVkIGNsZWFyIHRleHQgcGFzc3dvcmRzLCBjaGFuZ2UgdG8gbm8gaGVyZSEKI1Bhc3N3b3JkQXV0aGVudGljYXRpb24geWVzCiNQZXJtaXRFbXB0eVBhc3N3b3JkcyBubwpQYXNzd29yZEF1dGhlbnRpY2F0aW9uIG5vCgojIENoYW5nZSB0byBubyB0byBkaXNhYmxlIHMva2V5IHBhc3N3b3JkcwpDaGFsbGVuZ2VSZXNwb25zZUF1dGhlbnRpY2F0aW9uIHllcwojQ2hhbGxlbmdlUmVzcG9uc2VBdXRoZW50aWNhdGlvbiBubwoKIyBLZXJiZXJvcyBvcHRpb25zCiNLZXJiZXJvc0F1dGhlbnRpY2F0aW9uIG5vCiNLZXJiZXJvc09yTG9jYWxQYXNzd2QgeWVzCiNLZXJiZXJvc1RpY2tldENsZWFudXAgeWVzCiNLZXJiZXJvc0dldEFGU1Rva2VuIG5vCiNLZXJiZXJvc1VzZUt1c2Vyb2sgeWVzCgojIEdTU0FQSSBvcHRpb25zCiNHU1NBUElBdXRoZW50aWNhdGlvbiB5ZXMKI0dTU0FQSUNsZWFudXBDcmVkZW50aWFscyBubwojR1NTQVBJU3RyaWN0QWNjZXB0b3JDaGVjayB5ZXMKI0dTU0FQSUtleUV4Y2hhbmdlIG5vCiNHU1NBUElFbmFibGVrNXVzZXJzIG5vCgojIFNldCB0aGlzIHRvICd5ZXMnIHRvIGVuYWJsZSBQQU0gYXV0aGVudGljYXRpb24sIGFjY291bnQgcHJvY2Vzc2luZywKIyBhbmQgc2Vzc2lvbiBwcm9jZXNzaW5nLiBJZiB0aGlzIGlzIGVuYWJsZWQsIFBBTSBhdXRoZW50aWNhdGlvbiB3aWxsCiMgYmUgYWxsb3dlZCB0aHJvdWdoIHRoZSBDaGFsbGVuZ2VSZXNwb25zZUF1dGhlbnRpY2F0aW9uIGFuZAojIFBhc3N3b3JkQXV0aGVudGljYXRpb24uICBEZXBlbmRpbmcgb24geW91ciBQQU0gY29uZmlndXJhdGlvbiwKIyBQQU0gYXV0aGVudGljYXRpb24gdmlhIENoYWxsZW5nZVJlc3BvbnNlQXV0aGVudGljYXRpb24gbWF5IGJ5cGFzcwojIHRoZSBzZXR0aW5nIG9mICJQZXJtaXRSb290TG9naW4gd2l0aG91dC1wYXNzd29yZCIuCiMgSWYgeW91IGp1c3Qgd2FudCB0aGUgUEFNIGFjY291bnQgYW5kIHNlc3Npb24gY2hlY2tzIHRvIHJ1biB3aXRob3V0CiMgUEFNIGF1dGhlbnRpY2F0aW9uLCB0aGVuIGVuYWJsZSB0aGlzIGJ1dCBzZXQgUGFzc3dvcmRBdXRoZW50aWNhdGlvbgojIGFuZCBDaGFsbGVuZ2VSZXNwb25zZUF1dGhlbnRpY2F0aW9uIHRvICdubycuCiMgV0FSTklORzogJ1VzZVBBTSBubycgaXMgbm90IHN1cHBvcnRlZCBpbiBSZWQgSGF0IEVudGVycHJpc2UgTGludXggYW5kIG1heSBjYXVzZSBzZXZlcmFsCiMgcHJvYmxlbXMuClVzZVBBTSB5ZXMKCiNBbGxvd0FnZW50Rm9yd2FyZGluZyB5ZXMKI0FsbG93VGNwRm9yd2FyZGluZyB5ZXMKI0dhdGV3YXlQb3J0cyBubwpYMTFGb3J3YXJkaW5nIHllcwojWDExRGlzcGxheU9mZnNldCAxMAojWDExVXNlTG9jYWxob3N0IHllcwojUGVybWl0VFRZIHllcwojUHJpbnRNb3RkIHllcwojUHJpbnRMYXN0TG9nIHllcwojVENQS2VlcEFsaXZlIHllcwojVXNlTG9naW4gbm8KVXNlUHJpdmlsZWdlU2VwYXJhdGlvbiBubwojUGVybWl0VXNlckVudmlyb25tZW50IG5vCiNDb21wcmVzc2lvbiBkZWxheWVkCiNDbGllbnRBbGl2ZUludGVydmFsIDAKI0NsaWVudEFsaXZlQ291bnRNYXggMwojU2hvd1BhdGNoTGV2ZWwgbm8KI1VzZUROUyB5ZXMKI1BpZEZpbGUgL3Zhci9ydW4vc3NoZC5waWQKI01heFN0YXJ0dXBzIDEwOjMwOjEwMAojUGVybWl0VHVubmVsIG5vCiNDaHJvb3REaXJlY3Rvcnkgbm9uZQojVmVyc2lvbkFkZGVuZHVtIG5vbmUKCiMgbm8gZGVmYXVsdCBiYW5uZXIgcGF0aAojQmFubmVyIG5vbmUKCiMgQWNjZXB0IGxvY2FsZS1yZWxhdGVkIGVudmlyb25tZW50IHZhcmlhYmxlcwpBY2NlcHRFbnYgTEFORyBMQ19DVFlQRSBMQ19OVU1FUklDIExDX1RJTUUgTENfQ09MTEFURSBMQ19NT05FVEFSWSBMQ19NRVNTQUdFUwpBY2NlcHRFbnYgTENfUEFQRVIgTENfTkFNRSBMQ19BRERSRVNTIExDX1RFTEVQSE9ORSBMQ19NRUFTVVJFTUVOVApBY2NlcHRFbnYgTENfSURFTlRJRklDQVRJT04gTENfQUxMIExBTkdVQUdFCkFjY2VwdEVudiBYTU9ESUZJRVJTCgojIG92ZXJyaWRlIGRlZmF1bHQgb2Ygbm8gc3Vic3lzdGVtcwpTdWJzeXN0ZW0Jc2Z0cAkvdXNyL2xpYmV4ZWMvb3BlbnNzaC9zZnRwLXNlcnZlcgoKIyBFeGFtcGxlIG9mIG92ZXJyaWRpbmcgc2V0dGluZ3Mgb24gYSBwZXItdXNlciBiYXNpcwojTWF0Y2ggVXNlciBhbm9uY3ZzCiMJWDExRm9yd2FyZGluZyBubwojCUFsbG93VGNwRm9yd2FyZGluZyBubwojCVBlcm1pdFRUWSBubwojCUZvcmNlQ29tbWFuZCBjdnMgc2VydmVyCg== -kind: Secret -metadata: - labels: - pg-cluster: {{ .Values.pgclustername }} - pgo-backrest-repo: "true" - vendor: crunchydata - name: {{ .Values.pgclustername }}-backrest-repo-config - namespace: {{ .Values.namespace }} -type: Opaque diff --git a/examples/helm/create-cluster/templates/hippo-secret.yaml b/examples/helm/create-cluster/templates/hippo-secret.yaml deleted file mode 100644 index 8e922196e1..0000000000 --- a/examples/helm/create-cluster/templates/hippo-secret.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -data: - password: {{ .Values.hipposecretpassword | b64enc }} - username: {{ .Values.hipposecretuser | b64enc }} -kind: Secret -metadata: - labels: - pg-cluster: {{ .Values.pgclustername }} - vendor: crunchydata - name: {{ .Values.pgclustername }}-hippo-secret - namespace: {{ .Values.namespace }} -type: Opaque diff --git a/examples/helm/create-cluster/templates/pgcluster.yaml b/examples/helm/create-cluster/templates/pgcluster.yaml deleted file mode 100644 index 9dc5a4655d..0000000000 --- a/examples/helm/create-cluster/templates/pgcluster.yaml +++ /dev/null @@ -1,95 +0,0 @@ -apiVersion: crunchydata.com/v1 -kind: Pgcluster -metadata: - annotations: - current-primary: {{ .Values.pgclustername }} - labels: - autofail: "true" - crunchy-pgbadger: "false" - crunchy-pgha-scope: {{ .Values.pgclustername }} - crunchy-postgres-exporter: "false" - deployment-name: {{ .Values.pgclustername }} - name: {{ .Values.pgclustername }} - pg-cluster: {{ .Values.pgclustername }} - pg-pod-anti-affinity: "" - pgo-backrest: "true" - pgo-version: 4.5.0 - pgouser: admin - name: {{ .Values.pgclustername }} - namespace: {{ .Values.namespace }} -spec: - BackrestStorage: - accessmode: ReadWriteOnce - matchLabels: "" - name: "" - size: 3G - storageclass: "" - storagetype: dynamic - supplementalgroups: "" - PrimaryStorage: - accessmode: ReadWriteOnce - matchLabels: "" - name: {{ .Values.pgclustername }} - size: 3G - storageclass: "" - storagetype: dynamic - supplementalgroups: "" - ReplicaStorage: - accessmode: ReadWriteOnce - matchLabels: "" - name: "" - size: 3G - storageclass: "" - storagetype: dynamic - supplementalgroups: "" - annotations: - backrestLimits: {} - backrestRepoPath: "" - backrestResources: - memory: 48Mi - backrestS3Bucket: "" - backrestS3Endpoint: "" - backrestS3Region: "" - backrestS3URIStyle: "" - backrestS3VerifyTLS: "" - ccpimage: {{ .Values.ccpimage }} - ccpimageprefix: {{ .Values.ccpimageprefix }} - ccpimagetag: {{ .Values.ccpimagetag }} - clustername: {{ .Values.pgclustername }} - customconfig: "" - database: {{ .Values.pgclustername }} - exporterport: "9187" - limits: {} - name: {{ .Values.pgclustername }} - namespace: {{ .Values.namespace }} - pgBouncer: - limits: {} - replicas: 0 - pgDataSource: - restoreFrom: "" - restoreOpts: "" - pgbadgerport: "10000" - pgoimageprefix: {{ .Values.pgoimageprefix }} - podAntiAffinity: - default: preferred - pgBackRest: preferred - pgBouncer: preferred - policies: "" - port: "5432" - primarysecretname: {{ .Values.pgclustername }}-primaryuser-secret - replicas: "0" - rootsecretname: {{ .Values.pgclustername }}-postgres-secret - shutdown: false - standby: false - tablespaceMounts: {} - tls: - caSecret: "" - replicationTLSSecret: "" - tlsSecret: "" - tlsOnly: false - user: hippo - userlabels: - crunchy-postgres-exporter: "false" - pg-pod-anti-affinity: "" - pgo-version: {{ .Values.pgoversion }} - usersecretname: {{ .Values.pgclustername }}-hippo-secret diff --git a/examples/helm/create-cluster/templates/postgres-secret.yaml b/examples/helm/create-cluster/templates/postgres-secret.yaml deleted file mode 100644 index 914da77e1c..0000000000 --- a/examples/helm/create-cluster/templates/postgres-secret.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -data: - password: {{ .Values.postgressecretpassword | b64enc }} - username: {{ .Values.postgressecretuser | b64enc }} -kind: Secret -metadata: - labels: - pg-cluster: {{ .Values.pgclustername }} - vendor: crunchydata - name: {{ .Values.pgclustername }}-postgres-secret - namespace: {{ .Values.namespace }} -type: Opaque \ No newline at end of file diff --git a/examples/helm/create-cluster/templates/primaryuser-secret.yaml b/examples/helm/create-cluster/templates/primaryuser-secret.yaml deleted file mode 100644 index f4471b8fd2..0000000000 --- a/examples/helm/create-cluster/templates/primaryuser-secret.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -data: - password: {{ .Values.primaryusersecretpassword | b64enc }} - username: {{ .Values.primaryusersecretuser | b64enc }} -kind: Secret -metadata: - labels: - pg-cluster: {{ .Values.pgclustername }} - vendor: crunchydata - name: {{ .Values.pgclustername }}-primaryuser-secret - namespace: {{ .Values.namespace }} -type: Opaque \ No newline at end of file diff --git a/examples/helm/create-cluster/values.yaml b/examples/helm/create-cluster/values.yaml deleted file mode 100644 index 09438cb745..0000000000 --- a/examples/helm/create-cluster/values.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Default values for pg_deployment in SDX. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -# The values is for the namespace and the postgresql cluster name -ccpimage: crunchy-postgres-ha -ccpimageprefix: registry.developers.crunchydata.com/crunchydata -ccpimagetag: centos7-12.4-4.5.0 -namespace: pgo -pgclustername: hippo -pgoimageprefix: registry.developers.crunchydata.com/crunchydata -pgoversion: 4.5.0 -hipposecretuser: "hippo" -hipposecretpassword: "Supersecurepassword*" -postgressecretuser: "postgres" -postgressecretpassword: "Anothersecurepassword*" -primaryusersecretuser: "primaryuser" -primaryusersecretpassword: "Moresecurepassword*" \ No newline at end of file diff --git a/examples/helm/create-cluster/.helmignore b/examples/helm/postgres/.helmignore similarity index 100% rename from examples/helm/create-cluster/.helmignore rename to examples/helm/postgres/.helmignore diff --git a/examples/helm/create-cluster/Chart.yaml b/examples/helm/postgres/Chart.yaml similarity index 88% rename from examples/helm/create-cluster/Chart.yaml rename to examples/helm/postgres/Chart.yaml index 5857415edb..b4b3fcecbc 100644 --- a/examples/helm/create-cluster/Chart.yaml +++ b/examples/helm/postgres/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: crunchycrdcluster -description: A Helm chart for Kubernetes +description: Helm chart for deploying a PostgreSQL cluster with the Crunchy PostgreSQL Operator # A chart can be either an 'application' or a 'library' chart. # @@ -15,9 +15,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 1.16.0 +appVersion: 4.6.10 diff --git a/examples/helm/create-cluster/templates/NOTES.txt b/examples/helm/postgres/templates/NOTES.txt similarity index 64% rename from examples/helm/create-cluster/templates/NOTES.txt rename to examples/helm/postgres/templates/NOTES.txt index 542443a66e..4a3e324405 100644 --- a/examples/helm/create-cluster/templates/NOTES.txt +++ b/examples/helm/postgres/templates/NOTES.txt @@ -1,5 +1,3 @@ -Thank you deploying a crunchy postgreSQL cluster v{{ .Chart.AppVersion }}! - (((((((((((((((((((((( (((((((((((((%%%%%%%((((((((((((((( (((((((((((%%% %%%%(((((((((((( @@ -30,5 +28,27 @@ Thank you deploying a crunchy postgreSQL cluster v{{ .Chart.AppVersion }}! ####%%% %%%%% % %% %%%% +Thank you deploying a Crunchy PostgreSQL cluster v{{ .Chart.AppVersion }}! + +When your cluster has finished deploying, you can connect to it with the +following credentials: + + Username: {{ if .Values.username }}{{ .Values.username }}{{- else }}{{ .Values.name }}{{- end }} + Password: {{ .Values.password }} + +To connect to your PostgreSQL cluster, you can set up a port forward to your +local machine in a separate terminal window: + + kubectl -n {{ .Values.namespace }} port-forward svc/{{ .Values.name }} 5432:5432 + +And use the following connection string to connect to your cluster: + + PGPASSWORD="{{ .Values.password }}" psql -h localhost -U {{ if .Values.username }}{{ .Values.username }}{{- else }}{{ .Values.name }}{{- end }} {{ .Values.name }} + +If you need to log in as the PostgreSQL superuser, you can do so with the following command: + + PGPASSWORD=$(kubectl -n jkatz get secrets {{ .Values.name }}-postgres-secret -o jsonpath='{.data.password}' | base64 -d) psql -h localhost -U postgres {{ .Values.name }} + More information about the custom resource workflow the docs can be found here: -https://access.crunchydata.com/documentation/postgres-operator/latest/custom-resources/ + + https://access.crunchydata.com/documentation/postgres-operator/latest/custom-resources/ diff --git a/examples/helm/create-cluster/templates/_helpers.tpl b/examples/helm/postgres/templates/_helpers.tpl similarity index 100% rename from examples/helm/create-cluster/templates/_helpers.tpl rename to examples/helm/postgres/templates/_helpers.tpl diff --git a/examples/helm/postgres/templates/pgcluster.yaml b/examples/helm/postgres/templates/pgcluster.yaml new file mode 100644 index 0000000000..dae6801f94 --- /dev/null +++ b/examples/helm/postgres/templates/pgcluster.yaml @@ -0,0 +1,62 @@ +apiVersion: crunchydata.com/v1 +kind: Pgcluster +metadata: + annotations: + current-primary: {{ .Values.name | quote }} + labels: + crunchy-pgha-scope: {{ .Values.name | quote }} + deployment-name: {{ .Values.name | quote }} + name: {{ .Values.name | quote }} + pg-cluster: {{ .Values.name | quote }} + pgo-version: {{ .Chart.AppVersion | quote }} + pgouser: admin + name: {{ .Values.name | quote }} + namespace: {{ .Values.namespace | quote }} +spec: + BackrestStorage: + accessmode: ReadWriteOnce + size: {{ .Values.diskSize | default "2Gi" | quote }} + storagetype: dynamic + PrimaryStorage: + accessmode: ReadWriteOnce + name: {{ .Values.name | quote }} + size: {{ .Values.diskSize | default "1Gi" | quote }} + storagetype: dynamic + ReplicaStorage: + accessmode: ReadWriteOnce + size: {{ .Values.diskSize | default "1Gi" | quote }} + storagetype: dynamic + ccpimage: {{ .Values.image | default "crunchy-postgres-ha" | quote }} + ccpimageprefix: {{ .Values.imagePrefix | default "registry.developers.crunchydata.com/crunchydata" | quote }} + ccpimagetag: {{ .Values.imageTag | default "ubi8-13.10-4.6.10" | quote }} + clustername: {{ .Values.name | quote }} + database: {{ .Values.name | quote }} + {{- if .Values.monitoring }} + exporter: true + {{- end }} + exporterport: "9187" + limits: + cpu: {{ .Values.cpu | default "0.25" | quote }} + memory: {{ .Values.memory | default "1Gi" | quote }} + name: {{ .Values.name | quote }} + namespace: {{ .Values.namespace | quote }} + pgDataSource: + restoreFrom: "" + restoreOpts: "" + pgbadgerport: "10000" + pgoimageprefix: {{ .Values.imagePrefix | default "registry.developers.crunchydata.com/crunchydata" | quote }} + podAntiAffinity: + default: preferred + pgBackRest: preferred + pgBouncer: preferred + port: "5432" + {{- if .Values.ha }} + replicas: "1" + {{- end }} + {{- if .Values.username }} + user: {{ .Values.username | quote }} + {{- else }} + user: {{ .Values.name | quote }} + {{ end }} + userlabels: + pgo-version: {{ .Chart.AppVersion | quote }} diff --git a/examples/helm/postgres/templates/user-secret.yaml b/examples/helm/postgres/templates/user-secret.yaml new file mode 100644 index 0000000000..b44d31743d --- /dev/null +++ b/examples/helm/postgres/templates/user-secret.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + labels: + pg-cluster: {{ .Values.name | quote }} + vendor: crunchydata + name: {{ .Values.name }}-{{- if .Values.username }}{{ .Values.username }}{{- else }}{{ .Values.name }}{{- end }}-secret + namespace: {{ .Values.namespace | quote }} +data: + password: {{ .Values.password | b64enc | quote }} + username: {{ if .Values.username }}{{ .Values.username | b64enc | quote }}{{- else }}{{ .Values.name | b64enc | quote }}{{- end }} diff --git a/examples/helm/postgres/values.yaml b/examples/helm/postgres/values.yaml new file mode 100644 index 0000000000..cd00be0662 --- /dev/null +++ b/examples/helm/postgres/values.yaml @@ -0,0 +1,14 @@ +# The values is for the namespace and the postgresql cluster name +name: hippo +namespace: pgo +password: W4tch0ut4hippo$ + +# Optional parameters +# cpu: 0.25 +# diskSize: 5Gi +# monitoring: true +# ha: true +# imagePrefix: registry.developers.crunchydata.com/crunchydata +# image: crunchy-postgres-ha +# imageTag: ubi8-13.10-4.6.10 +# memory: 1Gi diff --git a/examples/kustomize/createcluster/README.md b/examples/kustomize/createcluster/README.md new file mode 100644 index 0000000000..f3a0ee5e46 --- /dev/null +++ b/examples/kustomize/createcluster/README.md @@ -0,0 +1,183 @@ +# create cluster +This is a working example that creates multiple clusters via the crd workflow using +kustomize. + +## Prerequisites + +### Postgres Operator +This example assumes you have the Crunchy PostgreSQL Operator installed +in a namespace called `pgo`. + +### Kustomize +Install the latest [kustomize](https://kubectl.docs.kubernetes.io/installation/kustomize/) version available. Kustomise is availble in kubectl but it will not be the latest version. + +## Documenation +Please see the [documentation](https://access.crunchydata.com/documentation/postgres-operator/latest/custom-resources/) for more guidance using custom resources. + +## Example set up and execution +Navigate to the createcluster directory under the examples/kustomize directory +``` +cd ./examples/kustomize/createcluster/ +``` +In the createcluster directory you will see a base directory and an overlay directory. Base will create a simple crunchy data postgreSQL cluster. There are 3 directories located in the overlay directory, dev, staging and prod. You can run kustomize against each of those and a Crunchy PostgreSQL cluster will be created for each and each of them are slightly different. + +### base +Lets generate the kustomize yaml for the base +``` +kustomize build base/ +``` +If the yaml looks good lets apply it. +``` +kustomize build base/ | kubectl apply -f - +``` +You will see these items are created after running the above command +``` +secret/hippo-hippo-secret created +secret/hippo-postgres-secret created +pgcluster.crunchydata.com/hippo created +``` +You may need to wait a few seconds depending on the resources you have allocated to you kubernetes set up for the Crunchy PostgreSQL cluster to become available. + +After the cluster is finished creating lets take a look at the cluster with the Crunchy PostgreSQL Operator +``` +pgo show cluster hippo -n pgo +``` +You will see something like this if successful: +``` +cluster : hippo (crunchy-postgres-ha:ubi8-13.10-4.6.10) + pod : hippo-8fb6bd96-j87wq (Running) on gke-xxxx-default-pool-38e946bd-257w (1/1) (primary) + pvc: hippo (1Gi) + deployment : hippo + deployment : hippo-backrest-shared-repo + service : hippo - ClusterIP (10.0.56.86) - Ports (2022/TCP, 5432/TCP) + labels : pgo-version=4.6.10 name=hippo crunchy-pgha-scope=hippo deployment-name=hippo pg-cluster=hippo pgouser=admin vendor=crunchydata +``` +Feel free to run other pgo cli commands on the hippo cluster + +### overlay +As mentioned above there are 3 overlays available in this example, these overlays will modify the common base. +#### development +The development overlay will deploy a simple Crunchy PostgreSQL cluster with pgbouncer + +Lets generate the kustomize yaml for the dev overlay +``` +kustomize build overlay/dev/ +``` +The yaml looks good now lets apply it +``` +kustomize build overlay/dev/ | kubectl apply -f - +``` +You will see these items are created after running the above command +``` +secret/dev-hippo-hippo-secret created +secret/dev-hippo-postgres-secret created +pgcluster.crunchydata.com/dev-hippo created +``` +After the cluster is finished creating lets take a look at the cluster with the Crunchy PostgreSQL Operator +``` +pgo show cluster dev-hippo -n pgo +``` +You will see something like this if successful: +``` +cluster : dev-hippo (crunchy-postgres-ha:ubi8-13.10-4.6.10) + pod : dev-hippo-588d4cb746-bwrxb (Running) on gke-xxxx-default-pool-95cba91c-0ppp (1/1) (primary) + pvc: dev-hippo (1Gi) + deployment : dev-hippo + deployment : dev-hippo-backrest-shared-repo + deployment : dev-hippo-pgbouncer + service : dev-hippo - ClusterIP (10.0.62.87) - Ports (2022/TCP, 5432/TCP) + service : dev-hippo-pgbouncer - ClusterIP (10.0.48.120) - Ports (5432/TCP) + labels : crunchy-pgha-scope=dev-hippo name=dev-hippo pg-cluster=dev-hippo vendor=crunchydata deployment-name=dev-hippo environment=development pgo-version=4.6.10 pgouser=admin +``` +#### staging +The staging overlay will deploy a crunchy postgreSQL cluster with 2 replica's with annotations added + +Lets generate the kustomize yaml for the staging overlay +``` +kustomize build overlay/staging/ +``` +The yaml looks good now lets apply it +``` +kustomize build overlay/staging/ | kubectl apply -f - +``` +You will see these items are created after running the above command +``` +secret/staging-hippo-hippo-secret created +secret/staging-hippo-postgres-secret created +pgcluster.crunchydata.com/staging-hippo created +pgreplica.crunchydata.com/staging-hippo-rpl1 created +``` +After the cluster is finished creating lets take a look at the cluster with the crunchy postgreSQL operator +``` +pgo show cluster staging-hippo -n pgo +``` +You will see something like this if successful, (Notice one of the replicas is a different size): +``` +cluster : staging-hippo (crunchy-postgres-ha:ubi8-13.10-4.6.10) + pod : staging-hippo-85cf6dcb65-9h748 (Running) on gke-xxxx-default-pool-95cba91c-0ppp (1/1) (primary) + pvc: staging-hippo (1Gi) + pod : staging-hippo-lnxw-cf47d8c8b-6r4wn (Running) on gke-xxxx-default-pool-21b7282d-rqkj (1/1) (replica) + pvc: staging-hippo-lnxw (1Gi) + pod : staging-hippo-rpl1-5d89d66f9b-44znd (Running) on gke-xxxx-default-pool-21b7282d-rqkj (1/1) (replica) + pvc: staging-hippo-rpl1 (2Gi) + deployment : staging-hippo + deployment : staging-hippo-backrest-shared-repo + deployment : staging-hippo-lnxw + deployment : staging-hippo-rpl1 + service : staging-hippo - ClusterIP (10.0.56.253) - Ports (2022/TCP, 5432/TCP) + service : staging-hippo-replica - ClusterIP (10.0.56.57) - Ports (2022/TCP, 5432/TCP) + pgreplica : staging-hippo-lnxw + pgreplica : staging-hippo-rpl1 + labels : deployment-name=staging-hippo environment=staging name=staging-hippo crunchy-pgha-scope=staging-hippo pg-cluster=staging-hippo pgo-version=4.6.10 pgouser=admin vendor=crunchydata +``` + +#### production +The production overlay will deploy a crunchy postgreSQL cluster with one replica + +Lets generate the kustomize yaml for the prod overlay +``` +kustomize build overlay/prod/ +``` +The yaml looks good now lets apply it +``` +kustomize build overlay/prod/ | kubectl apply -f - +``` +You will see these items are created after running the above command +``` +secret/prod-hippo-hippo-secret created +secret/prod-hippo-postgres-secret created +pgcluster.crunchydata.com/prod-hippo created +``` +After the cluster is finished creating lets take a look at the cluster with the crunchy postgreSQL operator +``` +pgo show cluster prod-hippo -n pgo +``` +You will see something like this if successful, (Notice one of the replicas is a different size): +``` +cluster : prod-hippo (crunchy-postgres-ha:ubi8-13.10-4.6.10) + pod : prod-hippo-5d6dd46497-rr67c (Running) on gke-xxxx-default-pool-21b7282d-rqkj (1/1) (primary) + pvc: prod-hippo (1Gi) + pod : prod-hippo-flty-84d97c8769-2pzbh (Running) on gke-xxxx-default-pool-95cba91c-0ppp (1/1) (replica) + pvc: prod-hippo-flty (1Gi) + deployment : prod-hippo + deployment : prod-hippo-backrest-shared-repo + deployment : prod-hippo-flty + service : prod-hippo - ClusterIP (10.0.56.18) - Ports (2022/TCP, 5432/TCP) + service : prod-hippo-replica - ClusterIP (10.0.56.101) - Ports (2022/TCP, 5432/TCP) + pgreplica : prod-hippo-flty + labels : pgo-version=4.6.10 deployment-name=prod-hippo environment=production pg-cluster=prod-hippo crunchy-pgha-scope=prod-hippo name=prod-hippo pgouser=admin vendor=crunchydata +``` +### Delete the clusters +To delete the clusters run the following pgo cli commands + +To delete all the clusters in the `pgo` namespace run the following: +``` +pgo delete cluster --all -n pgo +``` +Or to delete each cluster individually +``` +pgo delete cluster hippo -n pgo +pgo delete cluster dev-hippo -n pgo +pgo delete cluster staging-hippo -n pgo +pgo delete cluster prod-hippo -n pgo +``` diff --git a/examples/kustomize/createcluster/base/kustomization.yaml b/examples/kustomize/createcluster/base/kustomization.yaml new file mode 100644 index 0000000000..91fb2a0954 --- /dev/null +++ b/examples/kustomize/createcluster/base/kustomization.yaml @@ -0,0 +1,20 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: pgo +commonLabels: + vendor: crunchydata +secretGenerator: + - name: hippo-hippo-secret + options: + disableNameSuffixHash: true + literals: + - username=hippo + - password=Moresecurepassword* + - name: hippo-postgres-secret + options: + disableNameSuffixHash: true + literals: + - username=postgres + - password=Supersecurepassword* +resources: +- pgcluster.yaml diff --git a/examples/kustomize/createcluster/base/pgcluster.yaml b/examples/kustomize/createcluster/base/pgcluster.yaml new file mode 100644 index 0000000000..2f91873da9 --- /dev/null +++ b/examples/kustomize/createcluster/base/pgcluster.yaml @@ -0,0 +1,72 @@ +apiVersion: crunchydata.com/v1 +kind: Pgcluster +metadata: + annotations: + current-primary: hippo + labels: + autofail: "true" + crunchy-pgbadger: "false" + crunchy-pgha-scope: hippo + deployment-name: hippo + name: hippo + pg-cluster: hippo + pgo-version: 4.6.10 + pgouser: admin + name: hippo + namespace: pgo +spec: + BackrestStorage: + accessmode: ReadWriteOnce + matchLabels: "" + name: "" + size: 1G + storageclass: "" + storagetype: dynamic + supplementalgroups: "" + PrimaryStorage: + accessmode: ReadWriteOnce + matchLabels: "" + name: hippo + size: 1G + storageclass: "" + storagetype: dynamic + supplementalgroups: "" + ReplicaStorage: + accessmode: ReadWriteOnce + matchLabels: "" + name: "" + size: 1G + storageclass: "" + storagetype: dynamic + supplementalgroups: "" + annotations: + global: {} + backrest: {} + pgBouncer: {} + postgres: {} + ccpimage: crunchy-postgres-ha + ccpimageprefix: registry.developers.crunchydata.com/crunchydata + ccpimagetag: ubi8-13.10-4.6.10 + clustername: hippo + customconfig: "" + database: hippo + exporterport: "9187" + limits: {} + name: hippo + namespace: pgo + pgBouncer: + resources: {} + pgDataSource: + restoreFrom: "" + restoreOpts: "" + pgbadgerport: "10000" + pgoimageprefix: registry.developers.crunchydata.com/crunchydata + podAntiAffinity: + default: preferred + pgBackRest: preferred + pgBouncer: preferred + policies: "" + port: "5432" + user: hippo + userlabels: + pgo-version: 4.6.10 diff --git a/examples/kustomize/createcluster/overlay/dev/bouncer.json b/examples/kustomize/createcluster/overlay/dev/bouncer.json new file mode 100644 index 0000000000..622283f1fe --- /dev/null +++ b/examples/kustomize/createcluster/overlay/dev/bouncer.json @@ -0,0 +1,4 @@ +[ + { "op": "add", "path": "/spec/pgBouncer/resources/memory", "value": "24Mi"}, + { "op": "add", "path": "/spec/pgBouncer/replicas", "value": 1 } +] \ No newline at end of file diff --git a/examples/kustomize/createcluster/overlay/dev/devhippo.json b/examples/kustomize/createcluster/overlay/dev/devhippo.json new file mode 100644 index 0000000000..843e9c2c80 --- /dev/null +++ b/examples/kustomize/createcluster/overlay/dev/devhippo.json @@ -0,0 +1,15 @@ +[ + { "op": "replace", "path": "/metadata/annotations/current-primary", "value": "dev-hippo" }, + { "op": "replace", "path": "/metadata/labels/crunchy-pgha-scope", "value": "dev-hippo" }, + { "op": "replace", "path": "/metadata/labels/deployment-name", "value": "dev-hippo" }, + { "op": "replace", "path": "/metadata/labels/name", "value": "dev-hippo" }, + { "op": "replace", "path": "/metadata/labels/pg-cluster", "value": "dev-hippo" }, + { "op": "replace", "path": "/metadata/name", "value": "dev-hippo" }, + + { "op": "replace", "path": "/spec/PrimaryStorage/name", "value": "dev-hippo" }, + { "op": "replace", "path": "/spec/clustername", "value": "dev-hippo" }, + { "op": "replace", "path": "/spec/PrimaryStorage/name", "value": "dev-hippo" }, + { "op": "replace", "path": "/spec/clustername", "value": "dev-hippo" }, + { "op": "replace", "path": "/spec/database", "value": "dev-hippo" }, + { "op": "replace", "path": "/spec/name", "value": "dev-hippo" } +] diff --git a/examples/kustomize/createcluster/overlay/dev/kustomization.yaml b/examples/kustomize/createcluster/overlay/dev/kustomization.yaml new file mode 100644 index 0000000000..a78fe401af --- /dev/null +++ b/examples/kustomize/createcluster/overlay/dev/kustomization.yaml @@ -0,0 +1,22 @@ +resources: +- ../../base +namePrefix: dev- +namespace: pgo +commonLabels: + environment: development + +patchesJson6902: + - target: + group: crunchydata.com + version: v1 + namespace: pgo + kind: Pgcluster + name: dev-hippo + path: devhippo.json + - target: + group: crunchydata.com + version: v1 + namespace: pgo + kind: Pgcluster + name: dev-hippo + path: bouncer.json \ No newline at end of file diff --git a/examples/kustomize/createcluster/overlay/prod/kustomization.yaml b/examples/kustomize/createcluster/overlay/prod/kustomization.yaml new file mode 100644 index 0000000000..76e5756697 --- /dev/null +++ b/examples/kustomize/createcluster/overlay/prod/kustomization.yaml @@ -0,0 +1,15 @@ +resources: +- ../../base +namePrefix: prod- +namespace: pgo +commonLabels: + environment: production + +patchesJson6902: + - target: + group: crunchydata.com + version: v1 + namespace: pgo + kind: Pgcluster + name: prod-hippo + path: prodhippo.json \ No newline at end of file diff --git a/examples/kustomize/createcluster/overlay/prod/prodhippo.json b/examples/kustomize/createcluster/overlay/prod/prodhippo.json new file mode 100644 index 0000000000..76fd528ac0 --- /dev/null +++ b/examples/kustomize/createcluster/overlay/prod/prodhippo.json @@ -0,0 +1,16 @@ +[ + { "op": "replace", "path": "/metadata/annotations/current-primary", "value": "prod-hippo" }, + { "op": "replace", "path": "/metadata/labels/crunchy-pgha-scope", "value": "prod-hippo" }, + { "op": "replace", "path": "/metadata/labels/deployment-name", "value": "prod-hippo" }, + { "op": "replace", "path": "/metadata/labels/name", "value": "prod-hippo" }, + { "op": "replace", "path": "/metadata/labels/pg-cluster", "value": "prod-hippo" }, + { "op": "replace", "path": "/metadata/name", "value": "prod-hippo" }, + + { "op": "replace", "path": "/spec/PrimaryStorage/name", "value": "prod-hippo" }, + { "op": "replace", "path": "/spec/clustername", "value": "prod-hippo" }, + { "op": "replace", "path": "/spec/PrimaryStorage/name", "value": "prod-hippo" }, + { "op": "replace", "path": "/spec/clustername", "value": "prod-hippo" }, + { "op": "replace", "path": "/spec/database", "value": "prod-hippo" }, + { "op": "replace", "path": "/spec/name", "value": "prod-hippo" }, + { "op": "replace", "path": "/spec/replicas", "value": "1"} +] diff --git a/examples/kustomize/createcluster/overlay/staging/annotations.json b/examples/kustomize/createcluster/overlay/staging/annotations.json new file mode 100644 index 0000000000..34983a01c7 --- /dev/null +++ b/examples/kustomize/createcluster/overlay/staging/annotations.json @@ -0,0 +1,6 @@ +[ + { "op": "add", "path": "/spec/annotations/global/favorite", "value": "hippo"}, + { "op": "add", "path": "/spec/annotations/backrest/chair", "value": "comfy"}, + { "op": "add", "path": "/spec/annotations/pgBouncer/pool", "value": "swimming"}, + { "op": "add", "path": "/spec/annotations/postgres/elephant", "value": "cool"} +] \ No newline at end of file diff --git a/examples/kustomize/createcluster/overlay/staging/hippo-rpl1-pgreplica.yaml b/examples/kustomize/createcluster/overlay/staging/hippo-rpl1-pgreplica.yaml new file mode 100644 index 0000000000..4a9d910566 --- /dev/null +++ b/examples/kustomize/createcluster/overlay/staging/hippo-rpl1-pgreplica.yaml @@ -0,0 +1,23 @@ +apiVersion: crunchydata.com/v1 +kind: Pgreplica +metadata: + labels: + name: staging-hippo-rpl1 + pg-cluster: staging-hippo + pgouser: admin + name: hippo-rpl1 + namespace: pgo +spec: + clustername: staging-hippo + name: staging-hippo-rpl1 + namespace: pgo + replicastorage: + accessmode: ReadWriteOnce + matchLabels: "" + name: staging-hippo-rpl1 + size: 2G + storageclass: "" + storagetype: dynamic + supplementalgroups: "" + userlabels: + pgo-version: 4.6.10 diff --git a/examples/kustomize/createcluster/overlay/staging/kustomization.yaml b/examples/kustomize/createcluster/overlay/staging/kustomization.yaml new file mode 100644 index 0000000000..4fb92b8d16 --- /dev/null +++ b/examples/kustomize/createcluster/overlay/staging/kustomization.yaml @@ -0,0 +1,23 @@ +resources: +- ../../base +- hippo-rpl1-pgreplica.yaml +namePrefix: staging- +namespace: pgo +commonLabels: + environment: staging + +patchesJson6902: + - target: + group: crunchydata.com + version: v1 + namespace: pgo + kind: Pgcluster + name: staging-hippo + path: staginghippo.json + - target: + group: crunchydata.com + version: v1 + namespace: pgo + kind: Pgcluster + name: staging-hippo + path: annotations.json \ No newline at end of file diff --git a/examples/kustomize/createcluster/overlay/staging/staginghippo.json b/examples/kustomize/createcluster/overlay/staging/staginghippo.json new file mode 100644 index 0000000000..7a5a9ab23f --- /dev/null +++ b/examples/kustomize/createcluster/overlay/staging/staginghippo.json @@ -0,0 +1,16 @@ +[ + { "op": "replace", "path": "/metadata/annotations/current-primary", "value": "staging-hippo" }, + { "op": "replace", "path": "/metadata/labels/crunchy-pgha-scope", "value": "staging-hippo" }, + { "op": "replace", "path": "/metadata/labels/deployment-name", "value": "staging-hippo" }, + { "op": "replace", "path": "/metadata/labels/name", "value": "staging-hippo" }, + { "op": "replace", "path": "/metadata/labels/pg-cluster", "value": "staging-hippo" }, + { "op": "replace", "path": "/metadata/name", "value": "staging-hippo" }, + + { "op": "replace", "path": "/spec/PrimaryStorage/name", "value": "staging-hippo" }, + { "op": "replace", "path": "/spec/clustername", "value": "staging-hippo" }, + { "op": "replace", "path": "/spec/PrimaryStorage/name", "value": "staging-hippo" }, + { "op": "replace", "path": "/spec/clustername", "value": "staging-hippo" }, + { "op": "replace", "path": "/spec/database", "value": "staging-hippo" }, + { "op": "replace", "path": "/spec/name", "value": "staging-hippo" }, + { "op": "replace", "path": "/spec/replicas", "value": "1"} +] diff --git a/examples/pgo-bash-completion b/examples/pgo-bash-completion deleted file mode 100644 index 70271ccf0c..0000000000 --- a/examples/pgo-bash-completion +++ /dev/null @@ -1,2150 +0,0 @@ -# bash completion for pgo -*- shell-script -*- - -__pgo_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Homebrew on Macs have version 1.3 of bash-completion which doesn't include -# _init_completion. This is a very minimal version of that function. -__pgo_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -__pgo_index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__pgo_contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__pgo_handle_reply() -{ - __pgo_debug "${FUNCNAME[0]}" - case $cur in - -*) - if [[ $(type -t compopt) = "builtin" ]]; then - compopt -o nospace - fi - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) - if [[ $(type -t compopt) = "builtin" ]]; then - [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace - fi - - # complete after --flag=abc - if [[ $cur == *=* ]]; then - if [[ $(type -t compopt) = "builtin" ]]; then - compopt +o nospace - fi - - local index flag - flag="${cur%=*}" - __pgo_index_of_word "${flag}" "${flags_with_completion[@]}" - COMPREPLY=() - if [[ ${index} -ge 0 ]]; then - PREFIX="" - cur="${cur#*=}" - ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then - # zsh completion needs --flag= prefix - eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" - fi - fi - fi - return 0; - ;; - esac - - # check if we are handling a flag with special work handling - local index - __pgo_index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - completions=("${commands[@]}") - if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") - fi - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions+=("${must_have_one_flag[@]}") - fi - COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) - - if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) - fi - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - declare -F __custom_func >/dev/null && __custom_func - fi - - # available in bash-completion >= 2, not always present on macOS - if declare -F __ltrim_colon_completions >/dev/null; then - __ltrim_colon_completions "$cur" - fi - - # If there is only 1 completion and it is a flag with an = it will be completed - # but we don't want a space after the = - if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then - compopt -o nospace - fi -} - -# The arguments should be in the form "ext1|ext2|extn" -__pgo_handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__pgo_handle_subdirs_in_dir_flag() -{ - local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 -} - -__pgo_handle_flag() -{ - __pgo_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - local flagvalue - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagvalue=${flagname#*=} # take in as flagvalue after the = - flagname=${flagname%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __pgo_debug "${FUNCNAME[0]}: looking for ${flagname}" - if __pgo_contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # if you set a flag which only applies to this command, don't show subcommands - if __pgo_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then - commands=() - fi - - # keep flag value with flagname as flaghash - # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag - fi - fi - - # skip the argument to a two word flag - if __pgo_contains_word "${words[c]}" "${two_word_flags[@]}"; then - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - c=$((c+1)) - -} - -__pgo_handle_noun() -{ - __pgo_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - if __pgo_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - elif __pgo_contains_word "${words[c]}" "${noun_aliases[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__pgo_handle_command() -{ - __pgo_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]//:/__}" - else - if [[ $c -eq 0 ]]; then - next_command="_pgo_root_command" - else - next_command="_${words[c]//:/__}" - fi - fi - c=$((c+1)) - __pgo_debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F "$next_command" >/dev/null && $next_command -} - -__pgo_handle_word() -{ - if [[ $c -ge $cword ]]; then - __pgo_handle_reply - return - fi - __pgo_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __pgo_handle_flag - elif __pgo_contains_word "${words[c]}" "${commands[@]}"; then - __pgo_handle_command - elif [[ $c -eq 0 ]]; then - __pgo_handle_command - elif __pgo_contains_word "${words[c]}" "${command_aliases[@]}"; then - # aliashash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then - words[c]=${aliashash[${words[c]}]} - __pgo_handle_command - else - __pgo_handle_noun - fi - else - __pgo_handle_noun - fi - __pgo_handle_word -} - -_pgo_apply() -{ - last_command="pgo_apply" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_backup() -{ - last_command="pgo_backup" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--backup-opts=") - local_nonpersistent_flags+=("--backup-opts=") - flags+=("--backup-type=") - local_nonpersistent_flags+=("--backup-type=") - flags+=("--pgbackrest-storage-type=") - local_nonpersistent_flags+=("--pgbackrest-storage-type=") - flags+=("--pvc-name=") - local_nonpersistent_flags+=("--pvc-name=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--storage-config=") - local_nonpersistent_flags+=("--storage-config=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_cat() -{ - last_command="pgo_cat" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_create_cluster() -{ - last_command="pgo_create_cluster" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--autofail") - local_nonpersistent_flags+=("--autofail") - flags+=("--ccp-image=") - local_nonpersistent_flags+=("--ccp-image=") - flags+=("--ccp-image-tag=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--ccp-image-tag=") - flags+=("--custom-config=") - local_nonpersistent_flags+=("--custom-config=") - flags+=("--labels=") - two_word_flags+=("-l") - local_nonpersistent_flags+=("--labels=") - flags+=("--metrics") - local_nonpersistent_flags+=("--metrics") - flags+=("--node-label=") - local_nonpersistent_flags+=("--node-label=") - flags+=("--password=") - two_word_flags+=("-w") - local_nonpersistent_flags+=("--password=") - flags+=("--pgbackrest=") - local_nonpersistent_flags+=("--pgbackrest=") - flags+=("--pgbackrest-storage-type=") - local_nonpersistent_flags+=("--pgbackrest-storage-type=") - flags+=("--pgbadger") - local_nonpersistent_flags+=("--pgbadger") - flags+=("--pgbouncer") - local_nonpersistent_flags+=("--pgbouncer") - flags+=("--pgbouncer-pass=") - local_nonpersistent_flags+=("--pgbouncer-pass=") - flags+=("--policies=") - two_word_flags+=("-z") - local_nonpersistent_flags+=("--policies=") - flags+=("--replica-count=") - local_nonpersistent_flags+=("--replica-count=") - flags+=("--replica-storage-config=") - local_nonpersistent_flags+=("--replica-storage-config=") - flags+=("--secret-from=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--secret-from=") - two_word_flags+=("-e") - flags+=("--service-type=") - local_nonpersistent_flags+=("--service-type=") - flags+=("--storage-config=") - local_nonpersistent_flags+=("--storage-config=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_create_namespace() -{ - last_command="pgo_create_namespace" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_create_pgbouncer() -{ - last_command="pgo_create_pgbouncer" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--pgbouncer-pass=") - local_nonpersistent_flags+=("--pgbouncer-pass=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_create_pgorole() -{ - last_command="pgo_create_pgorole" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--permissions=") - local_nonpersistent_flags+=("--permissions=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_create_pgouser() -{ - last_command="pgo_create_pgouser" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--pgouser-namespaces=") - local_nonpersistent_flags+=("--pgouser-namespaces=") - flags+=("--pgouser-password=") - local_nonpersistent_flags+=("--pgouser-password=") - flags+=("--pgouser-roles=") - local_nonpersistent_flags+=("--pgouser-roles=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_create_policy() -{ - last_command="pgo_create_policy" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--in-file=") - two_word_flags+=("-i") - local_nonpersistent_flags+=("--in-file=") - flags+=("--url=") - two_word_flags+=("-u") - local_nonpersistent_flags+=("--url=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_create_schedule() -{ - last_command="pgo_create_schedule" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--ccp-image-tag=") - two_word_flags+=("-c") - local_nonpersistent_flags+=("--ccp-image-tag=") - flags+=("--database=") - local_nonpersistent_flags+=("--database=") - flags+=("--pgbackrest-backup-type=") - local_nonpersistent_flags+=("--pgbackrest-backup-type=") - flags+=("--pgbackrest-storage-type=") - local_nonpersistent_flags+=("--pgbackrest-storage-type=") - flags+=("--policy=") - local_nonpersistent_flags+=("--policy=") - flags+=("--pvc-name=") - local_nonpersistent_flags+=("--pvc-name=") - flags+=("--schedule=") - local_nonpersistent_flags+=("--schedule=") - flags+=("--schedule-opts=") - local_nonpersistent_flags+=("--schedule-opts=") - flags+=("--schedule-type=") - local_nonpersistent_flags+=("--schedule-type=") - flags+=("--secret=") - local_nonpersistent_flags+=("--secret=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_create_user() -{ - last_command="pgo_create_user" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--managed") - local_nonpersistent_flags+=("--managed") - flags+=("--password=") - local_nonpersistent_flags+=("--password=") - flags+=("--password-length=") - local_nonpersistent_flags+=("--password-length=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--username=") - local_nonpersistent_flags+=("--username=") - flags+=("--valid-days=") - local_nonpersistent_flags+=("--valid-days=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_create() -{ - last_command="pgo_create" - - command_aliases=() - - commands=() - commands+=("cluster") - commands+=("namespace") - commands+=("pgbouncer") - commands+=("pgorole") - commands+=("pgouser") - commands+=("policy") - commands+=("schedule") - commands+=("user") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete_backup() -{ - last_command="pgo_delete_backup" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete_cluster() -{ - last_command="pgo_delete_cluster" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--delete-backups") - flags+=("-b") - local_nonpersistent_flags+=("--delete-backups") - flags+=("--delete-data") - flags+=("-d") - local_nonpersistent_flags+=("--delete-data") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete_label() -{ - last_command="pgo_delete_label" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--label=") - local_nonpersistent_flags+=("--label=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete_namespace() -{ - last_command="pgo_delete_namespace" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete_pgbouncer() -{ - last_command="pgo_delete_pgbouncer" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete_pgorole() -{ - last_command="pgo_delete_pgorole" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete_pgouser() -{ - last_command="pgo_delete_pgouser" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete_policy() -{ - last_command="pgo_delete_policy" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete_schedule() -{ - last_command="pgo_delete_schedule" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--schedule-name=") - local_nonpersistent_flags+=("--schedule-name=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete_user() -{ - last_command="pgo_delete_user" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--username=") - local_nonpersistent_flags+=("--username=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_delete() -{ - last_command="pgo_delete" - - command_aliases=() - - commands=() - commands+=("backup") - commands+=("cluster") - commands+=("label") - commands+=("namespace") - commands+=("pgbouncer") - commands+=("pgorole") - commands+=("pgouser") - commands+=("policy") - commands+=("schedule") - commands+=("user") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_df() -{ - last_command="pgo_df" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_failover() -{ - last_command="pgo_failover" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--autofail-replace-replica=") - local_nonpersistent_flags+=("--autofail-replace-replica=") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--query") - local_nonpersistent_flags+=("--query") - flags+=("--target=") - local_nonpersistent_flags+=("--target=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_label() -{ - last_command="pgo_label" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--dry-run") - local_nonpersistent_flags+=("--dry-run") - flags+=("--label=") - local_nonpersistent_flags+=("--label=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_ls() -{ - last_command="pgo_ls" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_reload() -{ - last_command="pgo_reload" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_restore() -{ - last_command="pgo_restore" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--backup-opts=") - local_nonpersistent_flags+=("--backup-opts=") - flags+=("--backup-path=") - local_nonpersistent_flags+=("--backup-path=") - flags+=("--backup-pvc=") - local_nonpersistent_flags+=("--backup-pvc=") - flags+=("--backup-type=") - local_nonpersistent_flags+=("--backup-type=") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--node-label=") - local_nonpersistent_flags+=("--node-label=") - flags+=("--pgbackrest-storage-type=") - local_nonpersistent_flags+=("--pgbackrest-storage-type=") - flags+=("--pitr-target=") - local_nonpersistent_flags+=("--pitr-target=") - flags+=("--restore-to-pvc=") - local_nonpersistent_flags+=("--restore-to-pvc=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_scale() -{ - last_command="pgo_scale" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--ccp-image-tag=") - local_nonpersistent_flags+=("--ccp-image-tag=") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--node-label=") - local_nonpersistent_flags+=("--node-label=") - flags+=("--replica-count=") - local_nonpersistent_flags+=("--replica-count=") - flags+=("--service-type=") - local_nonpersistent_flags+=("--service-type=") - flags+=("--storage-config=") - local_nonpersistent_flags+=("--storage-config=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_scaledown() -{ - last_command="pgo_scaledown" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--delete-data") - flags+=("-d") - local_nonpersistent_flags+=("--delete-data") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--query") - local_nonpersistent_flags+=("--query") - flags+=("--target=") - local_nonpersistent_flags+=("--target=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_backup() -{ - last_command="pgo_show_backup" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--backup-type=") - local_nonpersistent_flags+=("--backup-type=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_cluster() -{ - last_command="pgo_show_cluster" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--ccp-image-tag=") - local_nonpersistent_flags+=("--ccp-image-tag=") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_config() -{ - last_command="pgo_show_config" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_namespace() -{ - last_command="pgo_show_namespace" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_pgorole() -{ - last_command="pgo_show_pgorole" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_pgouser() -{ - last_command="pgo_show_pgouser" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_policy() -{ - last_command="pgo_show_policy" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_pvc() -{ - last_command="pgo_show_pvc" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--node-label=") - local_nonpersistent_flags+=("--node-label=") - flags+=("--pvc-root=") - local_nonpersistent_flags+=("--pvc-root=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_schedule() -{ - last_command="pgo_show_schedule" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--schedule-name=") - local_nonpersistent_flags+=("--schedule-name=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_user() -{ - last_command="pgo_show_user" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--expired=") - local_nonpersistent_flags+=("--expired=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show_workflow() -{ - last_command="pgo_show_workflow" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_show() -{ - last_command="pgo_show" - - command_aliases=() - - commands=() - commands+=("backup") - commands+=("cluster") - commands+=("config") - commands+=("namespace") - commands+=("pgorole") - commands+=("pgouser") - commands+=("policy") - commands+=("pvc") - commands+=("schedule") - commands+=("user") - commands+=("workflow") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_status() -{ - last_command="pgo_status" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_test() -{ - last_command="pgo_test" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--output=") - two_word_flags+=("-o") - local_nonpersistent_flags+=("--output=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_update_cluster() -{ - last_command="pgo_update_cluster" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--autofail") - local_nonpersistent_flags+=("--autofail") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_update_namespace() -{ - last_command="pgo_update_namespace" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_update_pgorole() -{ - last_command="pgo_update_pgorole" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--permissions=") - local_nonpersistent_flags+=("--permissions=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_update_pgouser() -{ - last_command="pgo_update_pgouser" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all-namespaces") - local_nonpersistent_flags+=("--all-namespaces") - flags+=("--no-prompt") - local_nonpersistent_flags+=("--no-prompt") - flags+=("--pgouser-namespaces=") - local_nonpersistent_flags+=("--pgouser-namespaces=") - flags+=("--pgouser-password=") - local_nonpersistent_flags+=("--pgouser-password=") - flags+=("--pgouser-roles=") - local_nonpersistent_flags+=("--pgouser-roles=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_update_user() -{ - last_command="pgo_update_user" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--all") - local_nonpersistent_flags+=("--all") - flags+=("--expire-user") - local_nonpersistent_flags+=("--expire-user") - flags+=("--expired=") - local_nonpersistent_flags+=("--expired=") - flags+=("--password=") - local_nonpersistent_flags+=("--password=") - flags+=("--password-length=") - local_nonpersistent_flags+=("--password-length=") - flags+=("--selector=") - two_word_flags+=("-s") - local_nonpersistent_flags+=("--selector=") - flags+=("--username=") - local_nonpersistent_flags+=("--username=") - flags+=("--valid-days=") - local_nonpersistent_flags+=("--valid-days=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_update() -{ - last_command="pgo_update" - - command_aliases=() - - commands=() - commands+=("cluster") - commands+=("namespace") - commands+=("pgorole") - commands+=("pgouser") - commands+=("user") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_upgrade() -{ - last_command="pgo_upgrade" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--ccp-image-tag=") - local_nonpersistent_flags+=("--ccp-image-tag=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_version() -{ - last_command="pgo_version" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--help") - flags+=("-h") - local_nonpersistent_flags+=("--help") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_watch() -{ - last_command="pgo_watch" - - command_aliases=() - - commands=() - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--pgo-event-address=") - two_word_flags+=("-a") - local_nonpersistent_flags+=("--pgo-event-address=") - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -_pgo_root_command() -{ - last_command="pgo" - - command_aliases=() - - commands=() - commands+=("apply") - commands+=("backup") - commands+=("cat") - commands+=("create") - commands+=("delete") - commands+=("df") - commands+=("failover") - commands+=("label") - commands+=("load") - commands+=("ls") - commands+=("reload") - commands+=("restore") - commands+=("scale") - commands+=("scaledown") - commands+=("show") - commands+=("status") - commands+=("test") - commands+=("update") - commands+=("upgrade") - commands+=("version") - commands+=("watch") - - flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - - flags+=("--apiserver-url=") - flags+=("--debug") - flags+=("--namespace=") - two_word_flags+=("-n") - flags+=("--pgo-ca-cert=") - flags+=("--pgo-client-cert=") - flags+=("--pgo-client-key=") - - must_have_one_flag=() - must_have_one_noun=() - noun_aliases=() -} - -__start_pgo() -{ - local cur prev words cword - declare -A flaghash 2>/dev/null || : - declare -A aliashash 2>/dev/null || : - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -s || return - else - __pgo_init_completion -n "=" || return - fi - - local c=0 - local flags=() - local two_word_flags=() - local local_nonpersistent_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("pgo") - local must_have_one_flag=() - local must_have_one_noun=() - local last_command - local nouns=() - - __pgo_handle_word -} - -if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_pgo pgo -else - complete -o default -o nospace -F __start_pgo pgo -fi - -# ex: ts=4 sw=4 et filetype=sh diff --git a/go.mod b/go.mod index 0147d04c01..c2d5341893 100644 --- a/go.mod +++ b/go.mod @@ -1,27 +1,58 @@ module github.com/crunchydata/postgres-operator -go 1.15 +go 1.19 require ( - github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect github.com/fatih/color v1.9.0 - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect - github.com/google/go-cmp v0.4.1 // indirect github.com/gorilla/mux v1.7.4 - github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect - github.com/mattn/go-colorable v0.1.6 // indirect github.com/nsqio/go-nsq v1.0.8 github.com/robfig/cron/v3 v3.0.1 github.com/sirupsen/logrus v1.5.0 github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.5 github.com/xdg/stringprep v1.0.0 - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect - golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a - k8s.io/api v0.19.2 - k8s.io/apimachinery v0.19.2 - k8s.io/client-go v0.19.2 - sigs.k8s.io/controller-runtime v0.6.3 + golang.org/x/crypto v0.6.0 + golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 + k8s.io/api v0.21.10 + k8s.io/apimachinery v0.21.10 + k8s.io/client-go v0.21.10 + sigs.k8s.io/controller-runtime v0.6.4 sigs.k8s.io/yaml v1.2.0 ) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/evanphx/json-patch v4.9.0+incompatible // indirect + github.com/go-logr/logr v0.4.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.0 // indirect + github.com/golang/snappy v0.0.1 // indirect + github.com/google/go-cmp v0.5.5 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/googleapis/gnostic v0.4.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/imdario/mergo v0.3.9 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/json-iterator/go v1.1.10 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect + github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/pkg/errors v0.9.1 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/term v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect + google.golang.org/appengine v1.6.6 // indirect + google.golang.org/protobuf v1.26.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/klog/v2 v2.9.0 // indirect + k8s.io/kube-openapi v0.0.0-20211110012726-3cc51fd1e909 // indirect + k8s.io/utils v0.0.0-20210521133846-da695404a2bc // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect +) diff --git a/go.sum b/go.sum index 9be2bc52bd..6152c044e7 100644 --- a/go.sum +++ b/go.sum @@ -5,24 +5,47 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -47,6 +70,7 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -57,9 +81,9 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -68,8 +92,6 @@ github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c h1:ZfSZ3P3BedhKGUhzj7BQlPSU4OvT6tfOKe3DVHzOA7s= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -77,7 +99,9 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7fo github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -86,7 +110,7 @@ github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -94,13 +118,14 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -148,8 +173,9 @@ github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85n github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -160,19 +186,26 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -181,19 +214,28 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= @@ -206,6 +248,7 @@ github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= @@ -233,19 +276,19 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -263,10 +306,11 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -277,6 +321,8 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= @@ -303,24 +349,19 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -346,8 +387,9 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -357,6 +399,10 @@ github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -365,6 +411,8 @@ go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -379,15 +427,22 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -396,12 +451,18 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -419,29 +480,47 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -461,8 +540,10 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -470,22 +551,46 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -508,25 +613,62 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -535,31 +677,59 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -575,26 +745,30 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= -k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= -k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= -k8s.io/apiextensions-apiserver v0.18.6 h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrqOzaMo= +k8s.io/api v0.21.10 h1:WKcYyNBZNMrE9yejBs0Lx70jGsOW8uUwkiA4ioxkz1Q= +k8s.io/api v0.21.10/go.mod h1:5kqv2pCXwcrOvV12WhVAtLZUKaM0kyrZ6nHObw8SojA= k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= -k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.21.10 h1:mOStSZoCrsxnAMIm5UtCNn6P328cJAhtzJToQYFsylc= +k8s.io/apimachinery v0.21.10/go.mod h1:USs+ifLG6ZUgHGA/9lGxjdHzCB3hUO3fG1VBOwi0IHo= k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= -k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= -k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= +k8s.io/client-go v0.21.10 h1:/AKJEgLpQDWvZbq7cq2vEx0bpqpAlOOHitOrctSV8bI= +k8s.io/client-go v0.21.10/go.mod h1:nAGhVCjwhbDP2whk65n3STSCn24H/VGp1pKSk9UszU8= k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -602,27 +776,28 @@ k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20211110012726-3cc51fd1e909 h1:s77MRc/+/eQjsF89MB12JssAlsoi9mnNoaacRqibeAU= +k8s.io/kube-openapi v0.0.0-20211110012726-3cc51fd1e909/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210521133846-da695404a2bc h1:dx6VGe+PnOW/kD/2UV4aUSsRfJGd7+lcqgJ6Xg0HwUs= +k8s.io/utils v0.0.0-20210521133846-da695404a2bc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/controller-runtime v0.6.3 h1:SBbr+inLPEKhvlJtrvDcwIpm+uhDvp63Bl72xYJtoOE= -sigs.k8s.io/controller-runtime v0.6.3/go.mod h1:WlZNXcM0++oyaQt4B7C2lEE5JYRs8vJUzRP4N4JpdAY= +sigs.k8s.io/controller-runtime v0.6.4 h1:4013CKsBs5bEqo+LevzDett+LLxag/FjQWG94nVZ/9g= +sigs.k8s.io/controller-runtime v0.6.4/go.mod h1:WlZNXcM0++oyaQt4B7C2lEE5JYRs8vJUzRP4N4JpdAY= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 8aabc9a12b..82f4fd7c7a 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/hack/config_sync.sh b/hack/config_sync.sh index cab45b023b..7a8cb042b1 100755 --- a/hack/config_sync.sh +++ b/hack/config_sync.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2020 Crunchy Data Solutions, Inc. +# Copyright 2020 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index f81fc33be6..ca0a09a112 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2020 Crunchy Data Solutions, Inc. +# Copyright 2020 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/installers/ansible/README.md b/installers/ansible/README.md index a9f0babd16..836da40b76 100644 --- a/installers/ansible/README.md +++ b/installers/ansible/README.md @@ -1,15 +1,15 @@ -# Crunchy Data PostgreSQL Operator Playbook +# PGO: Postgres Operator Playbook

- Crunchy Data + PGO: The Postgres Operator from Crunchy Data

-Latest Release: 4.5.0 +Latest Release: 4.6.10 ## General -This repository contains Ansible Roles for deploying the Crunchy PostgreSQL Operator -for Kubernetes and OpenShift. +This repository contains Ansible Roles for deploying PGO: the Postgres Operator +from [Crunchy Data](https://www.crunchydata.com) for Kubernetes and OpenShift. See the [official documentation for more information](https://crunchydata.github.io/postgres-operator/stable/) on installing Crunchy PostgreSQL Operator. diff --git a/installers/ansible/roles/pgo-operator/defaults/main.yml b/installers/ansible/roles/pgo-operator/defaults/main.yml index 39fb88c679..e30ef6eda7 100644 --- a/installers/ansible/roles/pgo-operator/defaults/main.yml +++ b/installers/ansible/roles/pgo-operator/defaults/main.yml @@ -16,9 +16,7 @@ service_type: "ClusterIP" cleanup: "false" common_name: "crunchydata" crunchy_debug: "false" -enable_crunchyadm: "false" disable_replica_start_fail_reinit: "false" -disable_fsgroup: "false" default_instance_memory: "" default_pgbackrest_memory: "" diff --git a/installers/ansible/roles/pgo-operator/files/crds/pgclusters-crd.yaml b/installers/ansible/roles/pgo-operator/files/crds/pgclusters-crd.yaml index bea777b436..86c19ccdc3 100644 --- a/installers/ansible/roles/pgo-operator/files/crds/pgclusters-crd.yaml +++ b/installers/ansible/roles/pgo-operator/files/crds/pgclusters-crd.yaml @@ -3,6 +3,8 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: pgclusters.crunchydata.com + labels: + vendor: crunchydata spec: group: crunchydata.com names: @@ -24,12 +26,9 @@ spec: exporterport: { type: string } name: { type: string } pgbadgerport: { type: string } - primarysecretname: { type: string } PrimaryStorage: { type: object } port: { type: string } - rootsecretname: { type: string } userlabels: { type: object } - usersecretname: { type: string } status: properties: state: { type: string } diff --git a/installers/ansible/roles/pgo-operator/files/crds/pgpolicies-crd.yaml b/installers/ansible/roles/pgo-operator/files/crds/pgpolicies-crd.yaml index 32e0d2014c..73d84f0173 100644 --- a/installers/ansible/roles/pgo-operator/files/crds/pgpolicies-crd.yaml +++ b/installers/ansible/roles/pgo-operator/files/crds/pgpolicies-crd.yaml @@ -3,6 +3,8 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: pgpolicies.crunchydata.com + labels: + vendor: crunchydata spec: group: crunchydata.com names: diff --git a/installers/ansible/roles/pgo-operator/files/crds/pgreplicas-crd.yaml b/installers/ansible/roles/pgo-operator/files/crds/pgreplicas-crd.yaml index 303f77f1ce..167474a41f 100644 --- a/installers/ansible/roles/pgo-operator/files/crds/pgreplicas-crd.yaml +++ b/installers/ansible/roles/pgo-operator/files/crds/pgreplicas-crd.yaml @@ -3,6 +3,8 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: pgreplicas.crunchydata.com + labels: + vendor: crunchydata spec: group: crunchydata.com names: diff --git a/installers/ansible/roles/pgo-operator/files/crds/pgtasks-crd.yaml b/installers/ansible/roles/pgo-operator/files/crds/pgtasks-crd.yaml index 20fce21e7a..14ae07386d 100644 --- a/installers/ansible/roles/pgo-operator/files/crds/pgtasks-crd.yaml +++ b/installers/ansible/roles/pgo-operator/files/crds/pgtasks-crd.yaml @@ -3,6 +3,8 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: pgtasks.crunchydata.com + labels: + vendor: crunchydata spec: group: crunchydata.com names: diff --git a/installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/aws-s3-ca.crt b/installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/aws-s3-ca.crt index 519028c63b..419c8de459 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/aws-s3-ca.crt +++ b/installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/aws-s3-ca.crt @@ -19,3 +19,127 @@ Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp -----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj +ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM +9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw +IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 +VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L +93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm +jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA +A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI +U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs +N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv +o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU +5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy +rqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF +ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 +b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv +b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK +gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ +W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg +1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K +8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r +2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me +z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR +8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj +mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz +7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 ++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI +0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB +Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm +UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 +LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS +k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl +7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm +btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl +urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ +fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 +n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE +76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H +9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT +4PsJYGw= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl +ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr +ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr +BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM +YyRIHN8wfdVoOw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 +MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g +Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG +A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg +Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi +9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk +M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB +MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw +CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW +1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- diff --git a/installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/sshd_config b/installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/sshd_config index 3a96f209da..5a0f61e8f9 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/sshd_config +++ b/installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/sshd_config @@ -80,18 +80,9 @@ ChallengeResponseAuthentication yes #GSSAPIKeyExchange no #GSSAPIEnablek5users no -# Set this to 'yes' to enable PAM authentication, account processing, -# and session processing. If this is enabled, PAM authentication will -# be allowed through the ChallengeResponseAuthentication and -# PasswordAuthentication. Depending on your PAM configuration, -# PAM authentication via ChallengeResponseAuthentication may bypass -# the setting of "PermitRootLogin without-password". -# If you just want the PAM account and session checks to run without -# PAM authentication, then enable this but set PasswordAuthentication -# and ChallengeResponseAuthentication to 'no'. -# WARNING: 'UsePAM no' is not supported in Red Hat Enterprise Linux and may cause several -# problems. -UsePAM yes +# This is set explicitly to *no* as we are only using pubkey authentication and +# because each container is isolated to only an unprivileged user. +UsePAM no #AllowAgentForwarding yes #AllowTcpForwarding yes diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/affinity.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/affinity.json deleted file mode 100644 index a247bd9bb4..0000000000 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/affinity.json +++ /dev/null @@ -1,14 +0,0 @@ - "nodeAffinity": { - "preferredDuringSchedulingIgnoredDuringExecution": [{ - "weight": 10, - "preference": { - "matchExpressions": [{ - "key": "{{.NodeLabelKey}}", - "operator": "{{.OperatorValue}}", - "values": [ - "{{.NodeLabelValue}}" - ] - }] - } - }] - } diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/backrest-job.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/backrest-job.json index 82b326c7cf..cb0821d40f 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/backrest-job.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/backrest-job.json @@ -29,9 +29,16 @@ ], "securityContext": {{.SecurityContext}}, "serviceAccountName": "pgo-backrest", + {{ if .Tolerations }} + "tolerations": {{ .Tolerations }}, + {{ end }} "containers": [{ "name": "backrest", - "image": "{{.PGOImagePrefix}}/pgo-backrest:{{.PGOImageTag}}", + "image": "{{.CCPImagePrefix}}/crunchy-pgbackrest:{{.CCPImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "volumeMounts": [ {{.PgbackrestRestoreVolumeMounts}} ], @@ -39,6 +46,9 @@ "name": "COMMAND", "value": "{{.Command}}" }, { + "name": "MODE", + "value": "pgbackrest" + },{ "name": "COMMAND_OPTS", "value": "{{.CommandOpts}}" }, { @@ -54,11 +64,11 @@ "name": "PGBACKREST_DB_PATH", "value": "{{.PgbackrestDBPath}}" }, { - "name": "PGBACKREST_REPO_PATH", - "value": "{{.PgbackrestRepoPath}}" + "name": "PGBACKREST_REPO1_PATH", + "value": "{{.PgbackrestRepo1Path}}" }, { - "name": "PGBACKREST_REPO_TYPE", - "value": "{{.PgbackrestRepoType}}" + "name": "PGBACKREST_REPO1_TYPE", + "value": "{{.PgbackrestRepo1Type}}" },{ "name": "PGHA_PGBACKREST_LOCAL_S3_STORAGE", "value": "{{.BackrestLocalAndS3Storage}}" diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/cluster-bootstrap-job.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/cluster-bootstrap-job.json index ecd2cf735a..2afd0605f8 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/cluster-bootstrap-job.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/cluster-bootstrap-job.json @@ -23,9 +23,16 @@ "spec": { "securityContext": {{.SecurityContext}}, "serviceAccountName": "pgo-pg", + {{ if .Tolerations }} + "tolerations": {{ .Tolerations }}, + {{ end }} "containers": [{ "name": "database", "image": "{{.CCPImagePrefix}}/{{.CCPImage}}:{{.CCPImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, {{.ContainerResources}} "env": [{ "name": "PGHA_PG_PORT", @@ -57,9 +64,6 @@ { "name": "PGHA_DATABASE", "value": "{{.Database}}" - }, { - "name": "PGHA_CRUNCHYADM", - "value": "true" }, { "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "value": "{{.ReplicaReinitOnStartFail}}" @@ -137,9 +141,6 @@ }, { "mountPath": "/etc/pgbackrest/conf.d", "name": "pgbackrest-config" - }, { - "mountPath": "/crunchyadm", - "name": "crunchyadm" } {{.TablespaceVolumeMounts}} ], @@ -166,8 +167,7 @@ }, { "name": "sshd", "secret": { - "secretName": "{{.RestoreFrom}}-backrest-repo-config", - "defaultMode": 511 + "secretName": "{{.RestoreFrom}}-backrest-repo-config" } }, {{if .TLSEnabled}} @@ -190,10 +190,6 @@ } }, {{ end }} - { - "name": "crunchyadm", - "emptyDir": {} - }, { "name": "dshm", "emptyDir": { @@ -226,7 +222,9 @@ } {{.TablespaceVolumes}}], "affinity": { - {{.NodeSelector}} + {{if .NodeSelector}} + "nodeAffinity": {{.NodeSelector}} + {{ end }} {{if and .NodeSelector .PodAntiAffinity}},{{end}} {{.PodAntiAffinity}} }, diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/cluster-deployment.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/cluster-deployment.json index 4a44785b27..70164d94be 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/cluster-deployment.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/cluster-deployment.json @@ -12,10 +12,12 @@ "spec": { "replicas": {{.Replicas}}, "selector": { - "matchLabels": { + "matchLabels": { "vendor": "crunchydata", - {{.DeploymentLabels }} - } + "pg-cluster": "{{.ClusterName}}", + "pgo-pg-database": "true", + "deployment-name": "{{.Name}}" + } }, "template": { "metadata": { @@ -26,20 +28,28 @@ "name": "{{.Name}}", "vendor": "crunchydata", "pgo-pg-database": "true", + "{{.PodAntiAffinityLabelName}}": "{{.PodAntiAffinityLabelValue}}", {{.PodLabels }} } }, "spec": { "securityContext": {{.SecurityContext}}, "serviceAccountName": "pgo-pg", + {{ if .Tolerations }} + "tolerations": {{ .Tolerations }}, + {{ end }} "containers": [ { "name": "database", "image": "{{.CCPImagePrefix}}/{{.CCPImage}}:{{.CCPImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "readinessProbe": { "exec": { "command": [ - "/opt/cpm/bin/health/pgha-readiness.sh" + "/opt/crunchy/bin/postgres-ha/health/pgha-readiness.sh" ] }, "initialDelaySeconds": 15 @@ -47,7 +57,7 @@ "livenessProbe": { "exec": { "command": [ - "/opt/cpm/bin/health/pgha-liveness.sh" + "/opt/crunchy/bin/postgres-ha/health/pgha-liveness.sh" ] }, "initialDelaySeconds": 30, @@ -56,6 +66,10 @@ }, {{.ContainerResources }} "env": [{ + "name": "MODE", + "value": "postgres" + }, + { "name": "PGHA_PG_PORT", "value": "{{.Port}}" }, { @@ -89,9 +103,6 @@ { "name": "PGHA_DATABASE", "value": "{{.Database}}" - }, { - "name": "PGHA_CRUNCHYADM", - "value": "true" }, { "name": "PGHA_REPLICA_REINIT_ON_START_FAIL", "value": "{{.ReplicaReinitOnStartFail}}" @@ -169,9 +180,6 @@ }, { "mountPath": "/pgconf", "name": "pgconf-volume" - }, { - "mountPath": "/recover", - "name": "recover-volume" }, { "mountPath": "/dev/shm", @@ -181,10 +189,6 @@ "mountPath": "/etc/pgbackrest/conf.d", "name": "pgbackrest-config" }, - { - "mountPath": "/crunchyadm", - "name": "crunchyadm" - }, { "mountPath": "/etc/podinfo", "name": "podinfo" @@ -200,41 +204,13 @@ "protocol": "TCP" }], "imagePullPolicy": "IfNotPresent" - }{{if .EnableCrunchyadm}}, - { - "name": "crunchyadm", - "image": "{{.CCPImagePrefix}}/crunchy-admin:{{.CCPImageTag}}", - "securityContext": { - "runAsUser": 17 - }, - "readinessProbe": { - "exec": { - "command": [ - "/opt/cpm/bin/crunchyadm-readiness.sh" - ] - }, - "initialDelaySeconds": 30, - "timeoutSeconds": 10 - }, - "env": [ - { - "name": "PGHOST", - "value": "/crunchyadm" - } - ], - "volumeMounts": [ - { - "mountPath": "/crunchyadm", - "name": "crunchyadm" - } - ], - "imagePullPolicy": "IfNotPresent" - }{{ end }} - - {{.ExporterAddon }} - - {{.BadgerAddon }} - + } + {{ if .ExporterAddon }} + ,{{.ExporterAddon }} + {{ end }} + {{ if .BadgerAddon }} + ,{{.BadgerAddon }} + {{ end }} ], "volumes": [{ "name": "pgdata", @@ -252,8 +228,7 @@ }, { "name": "sshd", "secret": { - "secretName": "{{.ClusterName}}-backrest-repo-config", - "defaultMode": 511 + "secretName": "{{.ClusterName}}-backrest-repo-config" } }, { "name": "root-volume", @@ -308,14 +283,11 @@ {{ end }} {{ end }} { - "name": "recover-volume", - "emptyDir": { "medium": "Memory" } - }, { "name": "report", - "emptyDir": { "medium": "Memory" } - }, { - "name": "crunchyadm", - "emptyDir": {} + "emptyDir": { + "medium": "Memory", + "sizeLimit": "64Mi" + } }, { "name": "dshm", @@ -402,9 +374,11 @@ {{.TablespaceVolumes}} ], "affinity": { - {{.NodeSelector}} - {{if and .NodeSelector .PodAntiAffinity}},{{end}} - {{.PodAntiAffinity}} + {{if .NodeSelector}} + "nodeAffinity": {{ .NodeSelector }} + {{ end }} + {{if and .NodeSelector .PodAntiAffinity}},{{end}} + {{.PodAntiAffinity}} }, "restartPolicy": "Always", "dnsPolicy": "ClusterFirst" diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/exporter.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/exporter.json index c40a26e5ef..8473963a55 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/exporter.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/exporter.json @@ -1,6 +1,10 @@ -,{ +{ "name": "exporter", "image": "{{.PGOImagePrefix}}/crunchy-postgres-exporter:{{.PGOImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "ports": [{ "containerPort": {{.ExporterPort}}, "protocol": "TCP" @@ -35,7 +39,7 @@ "name": "EXPORTER_PG_USER", "valueFrom": { "secretKeyRef": { - "name": "{{.CollectSecretName}}", + "name": "{{.ExporterSecretName}}", "key": "username" } } @@ -44,7 +48,7 @@ "name": "EXPORTER_PG_PASSWORD", "valueFrom": { "secretKeyRef": { - "name": "{{.CollectSecretName}}", + "name": "{{.ExporterSecretName}}", "key": "password" } } diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgadmin-template.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgadmin-template.json index 5ea1d44249..dabb807e1e 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgadmin-template.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgadmin-template.json @@ -34,14 +34,19 @@ }, "spec": { "serviceAccountName": "pgo-default", - {{ if not .DisableFSGroup }} "securityContext": { - "fsGroup": 2 + {{ if not .DisableFSGroup }} + "fsGroup": 2, + {{ end }} + "runAsNonRoot": true }, - {{ end }} "containers": [{ "name": "pgadminweb", "image": "{{.CCPImagePrefix}}/crunchy-pgadmin4:{{.CCPImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "ports": [{ "containerPort": {{.Port}}, "protocol": "TCP" diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbadger.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbadger.json index d9b04daa73..26214a6bdd 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbadger.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbadger.json @@ -1,41 +1,45 @@ - ,{ - "name": "pgbadger", - "image": "{{.CCPImagePrefix}}/crunchy-pgbadger:{{.CCPImageTag}}", - "ports": [ { - "containerPort": {{.PGBadgerPort}}, - "protocol": "TCP" - } - ], - "readinessProbe": { - "tcpSocket": { - "port": {{.PGBadgerPort}} - }, - "initialDelaySeconds": 20, - "periodSeconds": 10 - }, - "env": [ { - "name": "BADGER_TARGET", - "value": "{{.BadgerTarget}}" - }, { - "name": "PGBADGER_SERVICE_PORT", - "value": "{{.PGBadgerPort}}" - } ], - "resources": { - "limits": { - "cpu": "500m", - "memory": "64Mi" - } - }, - "volumeMounts": [ - { - "mountPath": "/pgdata", - "name": "pgdata", - "readOnly": true - }, - { - "mountPath": "/report", - "name": "report", - "readOnly": false - } - ] - } +{ + "name": "pgbadger", + "image": "{{.CCPImagePrefix}}/crunchy-pgbadger:{{.CCPImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, + "ports": [ { + "containerPort": {{.PGBadgerPort}}, + "protocol": "TCP" + } + ], + "readinessProbe": { + "tcpSocket": { + "port": {{.PGBadgerPort}} + }, + "initialDelaySeconds": 20, + "periodSeconds": 10 + }, + "env": [ { + "name": "BADGER_TARGET", + "value": "{{.BadgerTarget}}" + }, { + "name": "PGBADGER_SERVICE_PORT", + "value": "{{.PGBadgerPort}}" + } ], + "resources": { + "limits": { + "cpu": "500m", + "memory": "64Mi" + } + }, + "volumeMounts": [ + { + "mountPath": "/pgdata", + "name": "pgdata", + "readOnly": true + }, + { + "mountPath": "/report", + "name": "report", + "readOnly": false + } + ] +} diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer-template.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer-template.json index 38202a7464..3f797120fb 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer-template.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer-template.json @@ -39,9 +39,19 @@ }, "spec": { "serviceAccountName": "pgo-default", + "securityContext": { + {{ if not .DisableFSGroup }} + "fsGroup": 2, + {{ end }} + "runAsNonRoot": true + }, "containers": [{ "name": "pgbouncer", "image": "{{.CCPImagePrefix}}/crunchy-pgbouncer:{{.CCPImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "ports": [{ "containerPort": {{.Port}}, "protocol": "TCP" @@ -59,13 +69,41 @@ "name": "PG_PRIMARY_SERVICE_NAME", "value": "{{.PrimaryServiceName}}" }], - "volumeMounts": [{ + "volumeMounts": [ + {{if .TLSEnabled}} + { + "mountPath": "/pgconf/tls/pgbouncer", + "name": "tls-pgbouncer" + }, + {{ end }} + { "name": "pgbouncer-conf", "mountPath": "/pgconf/", "readOnly": false - }] + } + ] }], "volumes": [ + {{if .TLSEnabled}} + { + "name": "tls-pgbouncer", + "defaultMode": 288, + "projected": { + "sources": [ + { + "secret": { + "name": "{{.TLSSecret}}" + } + }, + { + "secret": { + "name": "{{.CASecret}}" + } + } + ] + } + }, + {{ end }} { "name": "pgbouncer-conf", "projected": { @@ -78,7 +116,7 @@ { "secret": { "name": "{{.PGBouncerSecret}}", - "defaultMode": 511 + "defaultMode": 288 } } ] diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer.ini b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer.ini index 157f9a96e1..5310692c37 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer.ini +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer.ini @@ -20,3 +20,11 @@ reserve_pool_size = 0 reserve_pool_timeout = 5 query_timeout = 0 ignore_startup_parameters = extra_float_digits +{{ if .TLSEnabled }} +client_tls_sslmode = require +client_tls_key_file = /pgconf/tls/pgbouncer/tls.key +client_tls_cert_file = /pgconf/tls/pgbouncer/tls.crt +client_tls_ca_file = /pgconf/tls/pgbouncer/ca.crt +server_tls_sslmode = verify-ca +server_tls_ca_file = /pgconf/tls/pgbouncer/ca.crt +{{ end }} diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer_hba.conf b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer_hba.conf index 824c82705e..aee753cd1a 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer_hba.conf +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgbouncer_hba.conf @@ -1 +1,5 @@ +{{ if .TLSEnabled }} +hostssl all all 0.0.0.0/0 md5 +{{ else }} host all all 0.0.0.0/0 md5 +{{ end }} diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgdump-job.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgdump-job.json index 3b827ecaac..3749b4ed20 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgdump-job.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgdump-job.json @@ -31,9 +31,18 @@ ], "securityContext": {{.SecurityContext}}, "serviceAccountName": "pgo-default", + {{ if .Tolerations }} + "tolerations": {{ .Tolerations }}, + {{ end }} "containers": [{ "name": "pgdump", - "image": "{{.CCPImagePrefix}}/crunchy-pgdump:{{.CCPImageTag}}", + "image": "{{.CCPImagePrefix}}/crunchy-postgres-ha:{{.CCPImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, + "command": ["/opt/crunchy/bin/uid_postgres.sh"], + "args": ["/opt/crunchy/bin/start.sh"], "volumeMounts": [ { "mountPath": "/pgdata", @@ -42,6 +51,10 @@ } ], "env": [ + { + "name": "MODE", + "value": "pgdump" + }, { "name": "PGDUMP_HOST", "value": "{{.PgDumpHost}}" diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-repo-template.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-repo-template.json index 5f9e5d5049..9f5f6a88b0 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-repo-template.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-repo-template.json @@ -46,9 +46,16 @@ "spec": { "securityContext": {{.SecurityContext}}, "serviceAccountName": "pgo-default", + {{ if .Tolerations }} + "tolerations": {{ .Tolerations }}, + {{ end }} "containers": [{ "name": "database", - "image": "{{.PGOImagePrefix}}/pgo-backrest-repo:{{.PGOImageTag}}", + "image": "{{.CCPImagePrefix}}/crunchy-pgbackrest-repo:{{.CCPImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "ports": [{ "containerPort": {{.SshdPort}}, "protocol": "TCP" @@ -57,20 +64,20 @@ "env": [ {{.PgbackrestS3EnvVars}} { - "name": "PGBACKREST_STANZA", - "value": "{{.PgbackrestStanza}}" + "name": "MODE", + "value": "pgbackrest-repo" }, { - "name": "SSHD_PORT", - "value": "{{.SshdPort}}" + "name": "PGBACKREST_STANZA", + "value": "{{.PgbackrestStanza}}" }, { "name": "PGBACKREST_DB_PATH", "value": "{{.PgbackrestDBPath}}" }, { - "name": "PGBACKREST_REPO_PATH", - "value": "{{.PgbackrestRepoPath}}" + "name": "PGBACKREST_REPO1_PATH", + "value": "{{.PgbackrestRepo1Path}}" }, { "name": "PGBACKREST_PG1_PORT", @@ -102,8 +109,7 @@ "volumes": [{ "name": "sshd", "secret": { - "secretName": "{{.SshdSecretsName}}", - "defaultMode": 511 + "secretName": "{{.SshdSecretsName}}" } }, { "name": "backrestrepo", diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-role-binding.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-role-binding.json index 84f1c031fc..5c4163b892 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-role-binding.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-role-binding.json @@ -3,7 +3,10 @@ "kind": "RoleBinding", "metadata": { "name": "pgo-backrest-role-binding", - "namespace": "{{.TargetNamespace}}" + "namespace": "{{.TargetNamespace}}", + "labels": { + "vendor": "crunchydata" + } }, "roleRef": { "apiGroup": "rbac.authorization.k8s.io", diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-role.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-role.json index ca1c5b4e0b..f14634c7c1 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-role.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-role.json @@ -3,7 +3,10 @@ "kind": "Role", "metadata": { "name": "pgo-backrest-role", - "namespace": "{{.TargetNamespace}}" + "namespace": "{{.TargetNamespace}}", + "labels": { + "vendor": "crunchydata" + } }, "rules": [ { diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-sa.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-sa.json index d3d8d19c4b..cf5607a504 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-sa.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-backrest-sa.json @@ -3,6 +3,9 @@ "kind": "ServiceAccount", "metadata": { "name": "pgo-backrest", - "namespace": "{{.TargetNamespace}}" + "namespace": "{{.TargetNamespace}}", + "labels": { + "vendor": "crunchydata" + } } } diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-default-sa.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-default-sa.json index 5a8a52865c..f35dd542bd 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-default-sa.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-default-sa.json @@ -3,7 +3,10 @@ "kind": "ServiceAccount", "metadata": { "name": "pgo-default", - "namespace": "{{.TargetNamespace}}" + "namespace": "{{.TargetNamespace}}", + "labels": { + "vendor": "crunchydata" + } }, "automountServiceAccountToken": false } diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-role-binding.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-role-binding.json index df279ee347..5b23bcd927 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-role-binding.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-role-binding.json @@ -3,7 +3,10 @@ "kind": "RoleBinding", "metadata": { "name": "pgo-target-role-binding", - "namespace": "{{.TargetNamespace}}" + "namespace": "{{.TargetNamespace}}", + "labels": { + "vendor": "crunchydata" + } }, "roleRef": { "apiGroup": "rbac.authorization.k8s.io", diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-role.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-role.json index 1cb6a31cc5..612307356d 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-role.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-role.json @@ -3,7 +3,10 @@ "kind": "Role", "metadata": { "name": "pgo-target-role", - "namespace": "{{.TargetNamespace}}" + "namespace": "{{.TargetNamespace}}", + "labels": { + "vendor": "crunchydata" + } }, "rules": [ { @@ -15,8 +18,6 @@ "endpoints", "pods", "pods/exec", - "pods/log", - "replicasets", "secrets", "services", "persistentvolumeclaims" @@ -32,12 +33,26 @@ "deletecollection" ] }, + { + "apiGroups": [ + "" + ], + "resources": [ + "pods/log" + ], + "verbs":[ + "get", + "list", + "watch" + ] + }, { "apiGroups": [ "apps" ], "resources": [ - "deployments" + "deployments", + "replicasets" ], "verbs":[ "get", diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-sa.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-sa.json index 5d31bd4441..28cfb06565 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-sa.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo-target-sa.json @@ -3,6 +3,9 @@ "kind": "ServiceAccount", "metadata": { "name": "pgo-target", - "namespace": "{{.TargetNamespace}}" + "namespace": "{{.TargetNamespace}}", + "labels": { + "vendor": "crunchydata" + } } } diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo.sqlrunner-template.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo.sqlrunner-template.json index 56dbf8b035..b0493ba6d9 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo.sqlrunner-template.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgo.sqlrunner-template.json @@ -21,11 +21,27 @@ }, "spec": { "serviceAccountName": "pgo-default", + "securityContext": { + "runAsNonRoot": true + }, + {{ if .Tolerations }} + "tolerations": {{ .Tolerations }}, + {{ end }} "containers": [ { "name": "sqlrunner", - "image": "{{.PGOImagePrefix}}/pgo-sqlrunner:{{.PGOImageTag}}", + "image": "{{.CCPImagePrefix}}/crunchy-postgres-ha:{{.CCPImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, + "command": ["/opt/crunchy/bin/uid_postgres.sh"], + "args": ["/opt/crunchy/bin/start.sh"], "env": [ + { + "name": "MODE", + "value": "sqlrunner" + }, { "name": "PG_HOST", "value": "{{.PGHost}}" diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgrestore-job.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgrestore-job.json index 4dae8fda14..477c9e5305 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/pgrestore-job.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/pgrestore-job.json @@ -31,10 +31,19 @@ ], "securityContext": {{.SecurityContext}}, "serviceAccountName": "pgo-default", + {{ if .Tolerations }} + "tolerations": {{ .Tolerations }}, + {{ end }} "containers": [ { "name": "pgrestore", - "image": "{{.CCPImagePrefix}}/crunchy-pgrestore:{{.CCPImageTag}}", + "image": "{{.CCPImagePrefix}}/crunchy-postgres-ha:{{.CCPImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, + "command": ["/opt/crunchy/bin/uid_postgres.sh"], + "args": ["/opt/crunchy/bin/start.sh"], "volumeMounts": [ { "mountPath": "/pgdata", @@ -43,6 +52,10 @@ } ], "env": [ + { + "name": "MODE", + "value": "pgrestore" + }, { "name": "PGRESTORE_USER", "valueFrom": { @@ -84,8 +97,12 @@ ] } ], - {{.NodeSelector}} - "restartPolicy": "Never" + {{if .NodeSelector}} + "affinity": { + "nodeAffinity": {{.NodeSelector}} + }, + {{ end }} + "restartPolicy": "Never" } } } diff --git a/installers/ansible/roles/pgo-operator/files/pgo-configs/rmdata-job.json b/installers/ansible/roles/pgo-operator/files/pgo-configs/rmdata-job.json index b5f169fa4a..a9dbafe96a 100644 --- a/installers/ansible/roles/pgo-operator/files/pgo-configs/rmdata-job.json +++ b/installers/ansible/roles/pgo-operator/files/pgo-configs/rmdata-job.json @@ -21,9 +21,19 @@ }, "spec": { "serviceAccountName": "pgo-target", + "securityContext": { + "runAsNonRoot": true + }, + {{ if .Tolerations }} + "tolerations": {{ .Tolerations }}, + {{ end }} "containers": [{ "name": "rmdata", "image": "{{.PGOImagePrefix}}/pgo-rmdata:{{.PGOImageTag}}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "env": [{ "name": "PG_CLUSTER", "value": "{{.ClusterName}}" diff --git a/installers/ansible/roles/pgo-operator/tasks/certs.yml b/installers/ansible/roles/pgo-operator/tasks/certs.yml deleted file mode 100644 index 4c66e89892..0000000000 --- a/installers/ansible/roles/pgo-operator/tasks/certs.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- name: Ensure directory exists for local self-signed TLS certs. - file: - path: '{{ output_dir }}' - state: directory - tags: - - install - -- name: Generate RSA Key - command: openssl genrsa -out "{{ output_dir }}/server.key" 2048 - args: - creates: "{{ output_dir }}/server.key" - tags: - - install - -- name: Generate CSR - command: openssl req \ - -new \ - -subj '/C=US/ST=SC/L=Charleston/O=CrunchyData/CN=pg-operator' \ - -key "{{ output_dir }}/server.key" \ - -out "{{ output_dir }}/server.csr" - args: - creates: "{{ output_dir }}/server.csr" - tags: - - install - -- name: Generate Self-signed Certificate - command: openssl req \ - -x509 \ - -days 1825 \ - -key "{{ output_dir }}/server.key" \ - -in "{{ output_dir }}/server.csr" \ - -out "{{ output_dir }}/server.crt" - args: - creates: "{{ output_dir }}/server.crt" - tags: - - install - -- name: Ensure {{ pgo_keys_dir }} Directory Exists - file: - path: '{{ pgo_keys_dir }}' - state: directory - tags: - - install - -- name: Copy certificates to {{ pgo_keys_dir }} - command: "cp {{ output_dir }}/server.crt {{ pgo_keys_dir }}/client.crt" - tags: - - install - -- name: Copy keys to {{ pgo_keys_dir }} - command: "cp {{ output_dir }}/server.key {{ pgo_keys_dir }}/client.key" - tags: - - install diff --git a/installers/ansible/roles/pgo-operator/tasks/main.yml b/installers/ansible/roles/pgo-operator/tasks/main.yml index c9fc36e6a0..aafe7b6679 100644 --- a/installers/ansible/roles/pgo-operator/tasks/main.yml +++ b/installers/ansible/roles/pgo-operator/tasks/main.yml @@ -44,10 +44,6 @@ tags: - uninstall -- include_tasks: certs.yml - tags: - - install - - name: Use kubectl or oc set_fact: kubectl_or_oc: "{{ openshift_oc_bin if openshift_oc_bin is defined else 'kubectl' }}" @@ -106,7 +102,7 @@ when: pgorole_pgoadmin_result.rc == 1 - name: PGO Service Account - when: + when: - create_rbac|bool tags: - install @@ -128,7 +124,7 @@ when: pgo_service_account_result.rc == 1 - name: Cluster RBAC (namespace_mode 'dynamic') - when: + when: - create_rbac|bool - namespace_mode == "dynamic" tags: @@ -151,7 +147,7 @@ when: cluster_rbac_result.rc == 1 - name: Cluster RBAC (namespace_mode 'readonly') - when: + when: - create_rbac|bool - namespace_mode == "readonly" tags: @@ -179,7 +175,7 @@ tags: - install - update - when: + when: - create_rbac|bool - namespace_mode == "disabled" @@ -219,7 +215,8 @@ command: | {{ kubectl_or_oc }} create clusterrolebinding pgo-cluster-admin \ --clusterrole cluster-admin \ - --serviceaccount "{{ pgo_operator_namespace }}:postgres-operator" + --serviceaccount "{{ pgo_operator_namespace }}:postgres-operator" && \ + {{ kubectl_or_oc }} label clusterrolebinding pgo-cluster-admin vendor=crunchydata when: pgo_cluster_admin_result.rc == 1 @@ -266,31 +263,13 @@ - name: Create PGO BackRest Repo Secret command: | {{ kubectl_or_oc }} create secret generic pgo-backrest-repo-config \ - --from-file=config='{{ role_path }}/files/pgo-backrest-repo/config' \ - --from-file=sshd_config='{{ role_path }}/files/pgo-backrest-repo/sshd_config' \ - --from-file=aws-s3-ca.crt='{{ role_path }}/files/pgo-backrest-repo/aws-s3-ca.crt' \ --from-literal=aws-s3-key='{{ backrest_aws_s3_key }}' \ --from-literal=aws-s3-key-secret='{{ backrest_aws_s3_secret }}' \ -n {{ pgo_operator_namespace }} - when: pgo_backrest_repo_config_result.rc == 1 - - - name: PGO API Secret - tags: - - install - - update - block: - - name: Check PGO API Secret - shell: "{{ kubectl_or_oc }} get secret pgo.tls -n {{ pgo_operator_namespace }}" - register: pgo_tls_result - failed_when: false - - - name: Create PGO API Secret - command: | - {{ kubectl_or_oc }} create secret tls pgo.tls \ - --cert='{{ output_dir }}/server.crt' \ - --key='{{ output_dir }}/server.key' \ - -n {{ pgo_operator_namespace }} - when: pgo_tls_result.rc == 1 + when: + - pgo_backrest_repo_config_result.rc == 1 + - (backrest_aws_s3_key | default('') != '') or + (backrest_aws_s3_secret | default('') != '') - name: PGO ConfigMap tags: @@ -307,7 +286,7 @@ shell: "{{ kubectl_or_oc }} get configmap pgo-config -n {{ pgo_operator_namespace }}" register: pgo_config_result failed_when: false - + - name: Create PGO ConfigMap command: | {{ kubectl_or_oc }} create configmap pgo-config \ @@ -361,6 +340,8 @@ - name: Wait for PGO to finish deploying command: "{{ kubectl_or_oc }} rollout status deployment/postgres-operator -n {{ pgo_operator_namespace }}" async: 600 + vars: + ansible_async_dir: /tmp/.ansible_async - name: PGO Client tags: @@ -403,8 +384,8 @@ shell: "{{ kubectl_or_oc }} get -f {{ output_dir }}/pgo-client.json" register: pgo_client_json_result failed_when: false - + - name: Create PGO-Client deployment command: | {{ kubectl_or_oc }} create --filename='{{ output_dir }}/pgo-client.json' - when: pgo_client_json_result.rc == 1 \ No newline at end of file + when: pgo_client_json_result.rc == 1 diff --git a/installers/ansible/roles/pgo-operator/templates/add-targeted-namespace.sh.j2 b/installers/ansible/roles/pgo-operator/templates/add-targeted-namespace.sh.j2 index 380a8a80b7..ccaa310903 100644 --- a/installers/ansible/roles/pgo-operator/templates/add-targeted-namespace.sh.j2 +++ b/installers/ansible/roles/pgo-operator/templates/add-targeted-namespace.sh.j2 @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/installers/ansible/roles/pgo-operator/templates/cluster-rbac-readonly.yaml.j2 b/installers/ansible/roles/pgo-operator/templates/cluster-rbac-readonly.yaml.j2 index 3021d4a058..f34e1b9579 100644 --- a/installers/ansible/roles/pgo-operator/templates/cluster-rbac-readonly.yaml.j2 +++ b/installers/ansible/roles/pgo-operator/templates/cluster-rbac-readonly.yaml.j2 @@ -3,6 +3,8 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pgo-cluster-role + labels: + vendor: crunchydata rules: - apiGroups: - '' @@ -17,6 +19,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: pgo-cluster-role + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/installers/ansible/roles/pgo-operator/templates/cluster-rbac.yaml.j2 b/installers/ansible/roles/pgo-operator/templates/cluster-rbac.yaml.j2 index 771080042e..03783810da 100644 --- a/installers/ansible/roles/pgo-operator/templates/cluster-rbac.yaml.j2 +++ b/installers/ansible/roles/pgo-operator/templates/cluster-rbac.yaml.j2 @@ -3,6 +3,8 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pgo-cluster-role + labels: + vendor: crunchydata rules: - apiGroups: - '' @@ -42,8 +44,6 @@ rules: - endpoints - pods - pods/exec - - pods/log - - replicasets - secrets - services - persistentvolumeclaims @@ -56,10 +56,19 @@ rules: - update - delete - deletecollection + - apiGroups: + - '' + resources: + - pods/log + verbs: + - get + - list + - watch - apiGroups: - apps resources: - deployments + - replicasets verbs: - get - list @@ -104,6 +113,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: pgo-cluster-role + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/installers/ansible/roles/pgo-operator/templates/deployment.json.j2 b/installers/ansible/roles/pgo-operator/templates/deployment.json.j2 index b94ab4fc42..adeb88f8a3 100644 --- a/installers/ansible/roles/pgo-operator/templates/deployment.json.j2 +++ b/installers/ansible/roles/pgo-operator/templates/deployment.json.j2 @@ -24,6 +24,9 @@ }, "spec": { "serviceAccountName": "postgres-operator", + "securityContext": { + "runAsNonRoot": true + }, "containers": [ { "name": "apiserver", @@ -31,6 +34,10 @@ {%- else %}{{ pgo_image_prefix }}/pgo-apiserver:{{ pgo_image_tag }} {%- endif %}", "imagePullPolicy": "IfNotPresent", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "ports": [ { "containerPort": {{ pgo_apiserver_port }} } ], @@ -113,6 +120,10 @@ {%- else %}{{ pgo_image_prefix }}/postgres-operator:{{ pgo_image_tag }} {%- endif %}", "imagePullPolicy": "IfNotPresent", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "readinessProbe": { "exec": { "command": [ @@ -167,6 +178,10 @@ "image": "{% if pgo_scheduler_image | default('') != '' %}{{ pgo_scheduler_image }} {%- else %}{{ pgo_image_prefix }}/pgo-scheduler:{{ pgo_image_tag }} {%- endif %}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "livenessProbe": { "exec": { "command": [ @@ -216,6 +231,10 @@ "image": "{% if pgo_event_image | default('') != '' %}{{ pgo_event_image }} {%- else %}{{ pgo_image_prefix }}/pgo-event:{{ pgo_image_tag }} {%- endif %}", + "securityContext": { + "allowPrivilegeEscalation": false, + "privileged": false + }, "livenessProbe": { "httpGet": { "path": "/ping", diff --git a/installers/ansible/roles/pgo-operator/templates/local-namespace-rbac.yaml.j2 b/installers/ansible/roles/pgo-operator/templates/local-namespace-rbac.yaml.j2 index 4a878395ae..6eb3fe6144 100644 --- a/installers/ansible/roles/pgo-operator/templates/local-namespace-rbac.yaml.j2 +++ b/installers/ansible/roles/pgo-operator/templates/local-namespace-rbac.yaml.j2 @@ -3,6 +3,8 @@ kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pgo-local-ns + labels: + vendor: crunchydata rules: - apiGroups: - '' @@ -28,6 +30,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: pgo-local-ns + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -41,6 +45,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: pgo-target-role-binding + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/installers/ansible/roles/pgo-operator/templates/pgo-role-rbac.yaml.j2 b/installers/ansible/roles/pgo-operator/templates/pgo-role-rbac.yaml.j2 index 76af49dbcd..62ecb0a1b3 100644 --- a/installers/ansible/roles/pgo-operator/templates/pgo-role-rbac.yaml.j2 +++ b/installers/ansible/roles/pgo-operator/templates/pgo-role-rbac.yaml.j2 @@ -4,6 +4,8 @@ apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pgo-role namespace: {{ pgo_operator_namespace }} + labels: + vendor: crunchydata rules: - apiGroups: - '' @@ -28,6 +30,8 @@ kind: RoleBinding metadata: name: pgo-role namespace: {{ pgo_operator_namespace }} + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/installers/ansible/roles/pgo-operator/templates/pgo-service-account.yaml.j2 b/installers/ansible/roles/pgo-operator/templates/pgo-service-account.yaml.j2 index b8a8de6a95..3baaa4a9f8 100644 --- a/installers/ansible/roles/pgo-operator/templates/pgo-service-account.yaml.j2 +++ b/installers/ansible/roles/pgo-operator/templates/pgo-service-account.yaml.j2 @@ -4,6 +4,8 @@ kind: ServiceAccount metadata: name: postgres-operator namespace: {{ pgo_operator_namespace }} + labels: + vendor: crunchydata imagePullSecrets: {% if ccp_image_pull_secret %} - name: {{ ccp_image_pull_secret }} diff --git a/installers/ansible/roles/pgo-operator/templates/pgo.yaml.j2 b/installers/ansible/roles/pgo-operator/templates/pgo.yaml.j2 index f1b21fbbcb..5f70f1d41f 100644 --- a/installers/ansible/roles/pgo-operator/templates/pgo.yaml.j2 +++ b/installers/ansible/roles/pgo-operator/templates/pgo.yaml.j2 @@ -18,19 +18,19 @@ Cluster: PasswordAgeDays: {{ db_password_age_days }} PasswordLength: {{ db_password_length }} Replicas: {{ db_replicas }} - ArchiveMode: {{ archive_mode }} ServiceType: {{ service_type }} - EnableCrunchyadm: {{ enable_crunchyadm }} DisableReplicaStartFailReinit: {{ disable_replica_start_fail_reinit }} PodAntiAffinity: {{ pod_anti_affinity }} PodAntiAffinityPgBackRest: {{ pod_anti_affinity_pgbackrest }} PodAntiAffinityPgBouncer: {{ pod_anti_affinity_pgbouncer }} SyncReplication: {{ sync_replication }} +{% if disable_fsgroup is defined %} + DisableFSGroup: {{ disable_fsgroup }} +{% endif %} DefaultInstanceMemory: {{ default_instance_memory }} DefaultBackrestMemory: {{ default_pgbackrest_memory }} DefaultPgBouncerMemory: {{ default_pgbouncer_memory }} DefaultExporterMemory: {{ default_exporter_memory }} - DisableFSGroup: {{ disable_fsgroup }} PrimaryStorage: {{ primary_storage }} WALStorage: {{ wal_storage }} BackupStorage: {{ backup_storage }} diff --git a/installers/ansible/roles/pgo-operator/templates/service.json.j2 b/installers/ansible/roles/pgo-operator/templates/service.json.j2 index 766a060a72..b50985fa92 100644 --- a/installers/ansible/roles/pgo-operator/templates/service.json.j2 +++ b/installers/ansible/roles/pgo-operator/templates/service.json.j2 @@ -4,7 +4,8 @@ "metadata": { "name": "postgres-operator", "labels": { - "name": "postgres-operator" + "name": "postgres-operator", + "vendor": "crunchydata" } }, "spec": { diff --git a/installers/ansible/values.yaml b/installers/ansible/values.yaml index 4eb672bcec..50ba5c8055 100644 --- a/installers/ansible/values.yaml +++ b/installers/ansible/values.yaml @@ -17,7 +17,7 @@ badger: "false" ccp_image_prefix: "registry.developers.crunchydata.com/crunchydata" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" -ccp_image_tag: "centos7-12.4-4.5.0" +ccp_image_tag: "ubi8-13.10-4.6.10" create_rbac: "true" crunchy_debug: "false" db_name: "" @@ -33,7 +33,6 @@ default_exporter_memory: "24Mi" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_auto_failover: "false" -disable_fsgroup: "false" reconcile_rbac: "true" exporterport: "9187" metrics: "false" @@ -50,14 +49,14 @@ pgo_apiserver_url: "https://postgres-operator" pgo_client_cert_secret: "pgo.tls" pgo_client_container_install: "false" pgo_client_install: "true" -pgo_client_version: "4.5.0" +pgo_client_version: "4.6.10" pgo_cluster_admin: "false" pgo_disable_eventing: "false" pgo_disable_tls: "false" pgo_image_prefix: "registry.developers.crunchydata.com/crunchydata" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" -pgo_image_tag: "centos7-4.5.0" +pgo_image_tag: "ubi8-4.6.10" pgo_installation_name: "devtest" pgo_noauth_routes: "" pgo_operator_namespace: "pgo" diff --git a/installers/gcp-marketplace/Dockerfile b/installers/gcp-marketplace/Dockerfile index adf85a355a..464e7d74fd 100644 --- a/installers/gcp-marketplace/Dockerfile +++ b/installers/gcp-marketplace/Dockerfile @@ -20,22 +20,24 @@ RUN apt-get update \ && apt-get install -y --no-install-recommends ansible=2.9.* openssh-client \ && rm -rf /var/lib/apt/lists/* -COPY installers/ansible/* \ +COPY ansible/* \ /opt/postgres-operator/ansible/ -COPY installers/favicon.png \ - installers/gcp-marketplace/install-job.yaml \ - installers/gcp-marketplace/install.sh \ - installers/gcp-marketplace/values.yaml \ +COPY favicon.png \ + gcp-marketplace/install-job.yaml \ + gcp-marketplace/install.sh \ + gcp-marketplace/values.yaml \ /opt/postgres-operator/ -COPY installers/gcp-marketplace/install-hook.sh \ +COPY gcp-marketplace/install-hook.sh \ /bin/create_manifests.sh -COPY installers/gcp-marketplace/schema.yaml \ +COPY gcp-marketplace/schema.yaml \ /data/ -COPY installers/gcp-marketplace/application.yaml \ +COPY gcp-marketplace/application.yaml \ /data/manifest/ -COPY installers/gcp-marketplace/test-pod.yaml \ +COPY gcp-marketplace/test-pod.yaml \ /data-test/manifest/ +COPY gcp-marketplace/test-schema.yaml \ + /data-test/schema.yaml ARG PGO_VERSION RUN for file in \ diff --git a/installers/gcp-marketplace/Makefile b/installers/gcp-marketplace/Makefile index 5f4f0c6eb1..e5a0fca7a7 100644 --- a/installers/gcp-marketplace/Makefile +++ b/installers/gcp-marketplace/Makefile @@ -6,7 +6,7 @@ MARKETPLACE_TOOLS ?= gcr.io/cloud-marketplace-tools/k8s/dev:$(MARKETPLACE_VERSIO MARKETPLACE_VERSION ?= 0.9.4 KUBECONFIG ?= $(HOME)/.kube/config PARAMETERS ?= {} -PGO_VERSION ?= 4.5.0 +PGO_VERSION ?= 4.6.10 IMAGE_BUILD_ARGS = --build-arg MARKETPLACE_VERSION='$(MARKETPLACE_VERSION)' \ --build-arg PGO_VERSION='$(PGO_VERSION)' @@ -37,12 +37,12 @@ image: image-$(IMAGE_BUILDER) .PHONY: image-buildah image-buildah: ## Build the deployer image with Buildah - sudo buildah bud --file Dockerfile --tag '$(DEPLOYER_IMAGE)' $(IMAGE_BUILD_ARGS) --layers ../.. + sudo buildah bud --file Dockerfile --tag '$(DEPLOYER_IMAGE)' $(IMAGE_BUILD_ARGS) --layers .. sudo buildah push '$(DEPLOYER_IMAGE)' docker-daemon:'$(DEPLOYER_IMAGE)' .PHONY: image-docker image-docker: ## Build the deployer image with Docker - docker build --file Dockerfile --tag '$(DEPLOYER_IMAGE)' $(IMAGE_BUILD_ARGS) ../.. + docker build --file Dockerfile --tag '$(DEPLOYER_IMAGE)' $(IMAGE_BUILD_ARGS) .. # PARAMETERS='{"OPERATOR_NAMESPACE": "", "OPERATOR_NAME": "", "OPERATOR_ADMIN_PASSWORD": ""}' .PHONY: install diff --git a/installers/gcp-marketplace/README.md b/installers/gcp-marketplace/README.md index fd686764ad..62255ffe3f 100644 --- a/installers/gcp-marketplace/README.md +++ b/installers/gcp-marketplace/README.md @@ -1,11 +1,11 @@ - This directory contains the files that are used to install [Crunchy PostgreSQL for GKE][gcp-details], -which uses the PostgreSQL Operator, from the Google Cloud Marketplace. +which uses PGO: the PostgreSQL Operator from [Crunchy Data][crunchy-data], from the Google Cloud Marketplace. The integration centers around a container [image](./Dockerfile) that contains an installation [schema](./schema.yaml) and an [Application][k8s-app] [manifest](./application.yaml). Consult the [technical requirements][gcp-k8s-requirements] when making changes. +[crunchy-data]: https://www.crunchydata.com [k8s-app]: https://github.com/kubernetes-sigs/application/ [gcp-k8s]: https://cloud.google.com/marketplace/docs/kubernetes-apps/ [gcp-k8s-requirements]: https://cloud.google.com/marketplace/docs/partners/kubernetes-solutions/create-app-package @@ -59,7 +59,7 @@ Google Cloud Marketplace. ```shell IMAGE_REPOSITORY=gcr.io/crunchydata-public/postgres-operator - export PGO_VERSION=4.5.0 + export PGO_VERSION=4.6.10 export INSTALLER_IMAGE=${IMAGE_REPOSITORY}/deployer:${PGO_VERSION} export OPERATOR_IMAGE=${IMAGE_REPOSITORY}:${PGO_VERSION} export OPERATOR_IMAGE_API=${IMAGE_REPOSITORY}/pgo-apiserver:${PGO_VERSION} diff --git a/installers/gcp-marketplace/install.sh b/installers/gcp-marketplace/install.sh index 6dc770b993..cbe6d6890d 100755 --- a/installers/gcp-marketplace/install.sh +++ b/installers/gcp-marketplace/install.sh @@ -37,16 +37,35 @@ resources=( clusterrolebinding/pgo-cluster-role configmap/pgo-config deployment/postgres-operator + role/pgo-backrest-role + role/pgo-pg-role role/pgo-role + role/pgo-target-role + rolebinding/pgo-backrest-role-binding + rolebinding/pgo-pg-role-binding rolebinding/pgo-role + rolebinding/pgo-target-role-binding secret/pgo.tls - secret/pgo-backrest-repo-config secret/pgorole-pgoadmin secret/pgouser-admin service/postgres-operator + serviceaccount/pgo-backrest + serviceaccount/pgo-default + serviceaccount/pgo-pg + serviceaccount/pgo-target serviceaccount/postgres-operator ) for resource in "${resources[@]}"; do + kind="${resource%/*}" + name="${resource#*/}" + + for _ in $(seq 5); do + if [ "$( kc get "$kind" --field-selector="metadata.name=$name" --output=name )" ] + then break + else sleep 1s + fi + done + kc patch "$resource" --type=strategic --patch="$application_ownership" done diff --git a/installers/gcp-marketplace/schema.yaml b/installers/gcp-marketplace/schema.yaml index 6f0ec5320f..6b7e3df965 100644 --- a/installers/gcp-marketplace/schema.yaml +++ b/installers/gcp-marketplace/schema.yaml @@ -11,13 +11,13 @@ properties: INSTALLER_SERVICE_ACCOUNT: # This key appears in the ClusterRoleBinding name. title: Cluster Admin Service Account - description: >- - Name of a service account in the target namespace that has cluster-admin permissions. - This is used by the operator installer to create Custom Resource Definitions. type: string x-google-marketplace: type: SERVICE_ACCOUNT serviceAccount: + description: >- + Name of a service account in the target namespace that has cluster-admin permissions. + This is used by the operator installer to create Custom Resource Definitions. roles: - type: ClusterRole rulesType: PREDEFINED diff --git a/installers/gcp-marketplace/test-schema.yaml b/installers/gcp-marketplace/test-schema.yaml new file mode 100644 index 0000000000..5dae182d7e --- /dev/null +++ b/installers/gcp-marketplace/test-schema.yaml @@ -0,0 +1,6 @@ +properties: + OPERATOR_ADMIN_PASSWORD: + type: string + default: insecure + x-google-marketplace: + type: MASKED_FIELD diff --git a/installers/gcp-marketplace/values.yaml b/installers/gcp-marketplace/values.yaml index cb0840b35b..abcba17b68 100644 --- a/installers/gcp-marketplace/values.yaml +++ b/installers/gcp-marketplace/values.yaml @@ -10,7 +10,7 @@ badger: "false" ccp_image_prefix: "registry.developers.crunchydata.com/crunchydata" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" -ccp_image_tag: "centos7-12.4-4.5.0" +ccp_image_tag: "ubi8-13.10-4.6.10" create_rbac: "true" db_name: "" db_password_age_days: "0" @@ -32,9 +32,9 @@ pgo_admin_role_name: "pgoadmin" pgo_admin_username: "admin" pgo_client_container_install: "false" pgo_client_install: 'false' -pgo_client_version: "4.5.0" +pgo_client_version: "4.6.10" pgo_image_prefix: "registry.developers.crunchydata.com/crunchydata" -pgo_image_tag: "centos7-4.5.0" +pgo_image_tag: "ubi8-4.6.10" pgo_installation_name: '${OPERATOR_NAME}' pgo_operator_namespace: '${OPERATOR_NAMESPACE}' scheduler_timeout: "3600" diff --git a/installers/helm/Chart.yaml b/installers/helm/Chart.yaml index 6d7ffeaa30..a6df235a01 100644 --- a/installers/helm/Chart.yaml +++ b/installers/helm/Chart.yaml @@ -1,11 +1,11 @@ apiVersion: v2 name: postgres-operator -description: Crunchy PostgreSQL Operator Helm chart for Kubernetes +description: 'PGO: The Postgres Operator from Crunchy Data Helm Chart for Kubernetes' type: application -version: 0.1.0 -appVersion: 4.5.0 +version: 0.2.0 +appVersion: 4.6.10 home: https://github.com/CrunchyData/postgres-operator -icon: https://github.com/CrunchyData/postgres-operator/raw/master/crunchy_logo.png +icon: https://github.com/CrunchyData/postgres-operator/raw/master/docs/static/logos/pgo.svg keywords: - PostgreSQL - Operator @@ -13,4 +13,4 @@ keywords: - Postgres - SQL - NoSQL - - RDBMS \ No newline at end of file + - RDBMS diff --git a/installers/helm/README.md b/installers/helm/README.md index 6f06abd9bb..0dbe9ac5a3 100644 --- a/installers/helm/README.md +++ b/installers/helm/README.md @@ -1,10 +1,10 @@ -# Crunchy PostgreSQL Operator +# PGO: The Postgres Operator from Crunchy Data -This Helm chart installs the Crunchy PostgreSQL Operator by using its “pgo-deployer” -container. Helm will setup the ServiceAccount, RBAC, and ConfigMap needed to run -the container as a Kubernetes Job. Then a job will be created based on `helm` -`install`, `upgrade`, or `uninstall`. After the job has completed the RBAC will -be cleaned up. +This Helm chart installs PGO: the Postgres Operator from Crunchy Data by using +its “pgo-deployer” container. Helm will setup the ServiceAccount, RBAC, and +ConfigMap needed to run the container as a Kubernetes Job. Then a job will +be created based on `helm` `install`, `upgrade`, or `uninstall`. After the +job has completed the RBAC will be cleaned up. ## Prerequisites @@ -39,10 +39,10 @@ cd postgres-operator/installers/helm helm uninstall postgres-operator -n pgo ``` -## Configuration +## Configuration The following shows the configurable parameters that are relevant to the Helm -Chart. A full list of all Crunchy PostgreSQL Operator configuration options can +Chart. A full list of all PGO configuration options can be found in the [documentation](https://access.crunchydata.com/documentation/postgres-operator/latest/installation/configuration/). | Name | Default | Description | diff --git a/installers/helm/templates/postgres-operator-install.yaml b/installers/helm/templates/postgres-operator-install.yaml index 43b0604c3b..8196dfb2a9 100644 --- a/installers/helm/templates/postgres-operator-install.yaml +++ b/installers/helm/templates/postgres-operator-install.yaml @@ -3,7 +3,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: pgo-deploy + name: pgo-deploy-install namespace: {{ .Release.Namespace }} labels: {{ include "postgres-operator.labels" . | indent 4 }} diff --git a/installers/helm/templates/postgres-operator-uninstall.yaml b/installers/helm/templates/postgres-operator-uninstall.yaml index 0b7553b0e7..945295836b 100644 --- a/installers/helm/templates/postgres-operator-uninstall.yaml +++ b/installers/helm/templates/postgres-operator-uninstall.yaml @@ -3,7 +3,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: pgo-deploy + name: pgo-deploy-uninstall namespace: {{ .Release.Namespace }} labels: {{ include "postgres-operator.labels" . | indent 4 }} diff --git a/installers/helm/templates/postgres-operator-upgrade.yaml b/installers/helm/templates/postgres-operator-upgrade.yaml index 4ba8954b14..30a450c8d4 100644 --- a/installers/helm/templates/postgres-operator-upgrade.yaml +++ b/installers/helm/templates/postgres-operator-upgrade.yaml @@ -3,7 +3,7 @@ apiVersion: batch/v1 kind: Job metadata: - name: pgo-deploy + name: pgo-deploy-upgrade namespace: {{ .Release.Namespace }} labels: {{ include "postgres-operator.labels" . | indent 4 }} diff --git a/installers/helm/templates/rbac.yaml b/installers/helm/templates/rbac.yaml index dbef140471..19d6fc06e4 100644 --- a/installers/helm/templates/rbac.yaml +++ b/installers/helm/templates/rbac.yaml @@ -73,6 +73,7 @@ rules: - extensions resources: - deployments + - replicasets verbs: - get - list @@ -145,4 +146,4 @@ subjects: - kind: ServiceAccount name: {{ include "postgres-operator.serviceAccountName" . }} namespace: {{ .Release.Namespace }} -{{ end }} \ No newline at end of file +{{ end }} diff --git a/installers/helm/values.yaml b/installers/helm/values.yaml index 649436e0af..c80b1d2fef 100644 --- a/installers/helm/values.yaml +++ b/installers/helm/values.yaml @@ -37,7 +37,7 @@ badger: "false" ccp_image_prefix: "registry.developers.crunchydata.com/crunchydata" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" -ccp_image_tag: "centos7-12.4-4.5.0" +ccp_image_tag: "ubi8-13.10-4.6.10" create_rbac: "true" crunchy_debug: "false" db_name: "" @@ -53,7 +53,6 @@ default_exporter_memory: "24Mi" delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_auto_failover: "false" -disable_fsgroup: "false" reconcile_rbac: "true" exporterport: "9187" metrics: "false" @@ -70,14 +69,14 @@ pgo_apiserver_url: "https://postgres-operator" pgo_client_cert_secret: "pgo.tls" pgo_client_container_install: "false" pgo_client_install: "true" -pgo_client_version: "4.5.0" +pgo_client_version: "4.6.10" pgo_cluster_admin: "false" pgo_disable_eventing: "false" pgo_disable_tls: "false" pgo_image_prefix: "registry.developers.crunchydata.com/crunchydata" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" -pgo_image_tag: "centos7-4.5.0" +pgo_image_tag: "ubi8-4.6.10" pgo_installation_name: "devtest" pgo_noauth_routes: "" pgo_operator_namespace: "pgo" diff --git a/installers/image/bin/pgo-deploy.sh b/installers/image/bin/pgo-deploy.sh index 9a965d58be..11c81eeae2 100755 --- a/installers/image/bin/pgo-deploy.sh +++ b/installers/image/bin/pgo-deploy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2020 Crunchy Data Solutions, Inc. +# Copyright 2020 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/installers/image/conf/kubernetes.repo b/installers/image/conf/kubernetes.repo index 0a8b4cf2bf..8830e53746 100644 --- a/installers/image/conf/kubernetes.repo +++ b/installers/image/conf/kubernetes.repo @@ -3,5 +3,5 @@ name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=1 -repo_gpgcheck=1 +repo_gpgcheck=0 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg \ No newline at end of file diff --git a/installers/kubectl/client-setup.sh b/installers/kubectl/client-setup.sh index 6956d63f6b..e1431abd20 100755 --- a/installers/kubectl/client-setup.sh +++ b/installers/kubectl/client-setup.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2020 Crunchy Data Solutions, Inc. +# Copyright 2020 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -14,7 +14,7 @@ # This script should be run after the operator has been deployed PGO_OPERATOR_NAMESPACE="${PGO_OPERATOR_NAMESPACE:-pgo}" PGO_USER_ADMIN="${PGO_USER_ADMIN:-pgouser-admin}" -PGO_CLIENT_VERSION="${PGO_CLIENT_VERSION:-v4.5.0}" +PGO_CLIENT_VERSION="${PGO_CLIENT_VERSION:-v4.6.10}" PGO_CLIENT_URL="https://github.com/CrunchyData/postgres-operator/releases/download/${PGO_CLIENT_VERSION}" PGO_CMD="${PGO_CMD-kubectl}" diff --git a/installers/kubectl/postgres-operator-ocp311.yml b/installers/kubectl/postgres-operator-ocp311.yml index 9978d052d3..d44ce571fe 100644 --- a/installers/kubectl/postgres-operator-ocp311.yml +++ b/installers/kubectl/postgres-operator-ocp311.yml @@ -3,12 +3,16 @@ kind: ServiceAccount metadata: name: pgo-deployer-sa namespace: pgo + labels: + vendor: crunchydata --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: pgo-deployer-crb namespace: pgo + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -23,6 +27,8 @@ kind: ConfigMap metadata: name: pgo-deployer-cm namespace: pgo + labels: + vendor: crunchydata data: values.yaml: |- # ===================== @@ -44,7 +50,7 @@ data: ccp_image_prefix: "registry.developers.crunchydata.com/crunchydata" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" - ccp_image_tag: "centos7-12.4-4.5.0" + ccp_image_tag: "ubi8-13.10-4.6.10" create_rbac: "true" crunchy_debug: "false" db_name: "" @@ -60,7 +66,7 @@ data: delete_operator_namespace: "false" delete_watched_namespaces: "false" disable_auto_failover: "false" - disable_fsgroup: "false" + disable_fsgroup: "true" reconcile_rbac: "true" exporterport: "9187" metrics: "false" @@ -77,14 +83,14 @@ data: pgo_client_cert_secret: "pgo.tls" pgo_client_container_install: "false" pgo_client_install: "true" - pgo_client_version: "4.5.0" + pgo_client_version: "4.6.10" pgo_cluster_admin: "false" pgo_disable_eventing: "false" pgo_disable_tls: "false" pgo_image_prefix: "registry.developers.crunchydata.com/crunchydata" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" - pgo_image_tag: "centos7-4.5.0" + pgo_image_tag: "ubi8-4.6.10" pgo_installation_name: "devtest" pgo_noauth_routes: "" pgo_operator_namespace: "pgo" @@ -151,17 +157,21 @@ kind: Job metadata: name: pgo-deploy namespace: pgo + labels: + vendor: crunchydata spec: backoffLimit: 0 template: metadata: name: pgo-deploy + labels: + vendor: crunchydata spec: serviceAccountName: pgo-deployer-sa restartPolicy: Never containers: - name: pgo-deploy - image: registry.developers.crunchydata.com/crunchydata/pgo-deployer:centos7-4.5.0 + image: registry.developers.crunchydata.com/crunchydata/pgo-deployer:ubi8-4.6.10 imagePullPolicy: IfNotPresent env: - name: DEPLOY_ACTION diff --git a/installers/kubectl/postgres-operator.yml b/installers/kubectl/postgres-operator.yml index 2b516ef2ca..85fd290f9a 100644 --- a/installers/kubectl/postgres-operator.yml +++ b/installers/kubectl/postgres-operator.yml @@ -3,11 +3,15 @@ kind: ServiceAccount metadata: name: pgo-deployer-sa namespace: pgo + labels: + vendor: crunchydata --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: pgo-deployer-cr + labels: + vendor: crunchydata rules: - apiGroups: - '' @@ -60,6 +64,7 @@ rules: - extensions resources: - deployments + - replicasets verbs: - get - list @@ -117,6 +122,8 @@ kind: ConfigMap metadata: name: pgo-deployer-cm namespace: pgo + labels: + vendor: crunchydata data: values.yaml: |- # ===================== @@ -138,7 +145,7 @@ data: ccp_image_prefix: "registry.developers.crunchydata.com/crunchydata" ccp_image_pull_secret: "" ccp_image_pull_secret_manifest: "" - ccp_image_tag: "centos7-12.4-4.5.0" + ccp_image_tag: "ubi8-13.10-4.6.10" create_rbac: "true" crunchy_debug: "false" db_name: "" @@ -171,14 +178,14 @@ data: pgo_client_cert_secret: "pgo.tls" pgo_client_container_install: "false" pgo_client_install: "true" - pgo_client_version: "4.5.0" + pgo_client_version: "4.6.10" pgo_cluster_admin: "false" pgo_disable_eventing: "false" pgo_disable_tls: "false" pgo_image_prefix: "registry.developers.crunchydata.com/crunchydata" pgo_image_pull_secret: "" pgo_image_pull_secret_manifest: "" - pgo_image_tag: "centos7-4.5.0" + pgo_image_tag: "ubi8-4.6.10" pgo_installation_name: "devtest" pgo_noauth_routes: "" pgo_operator_namespace: "pgo" @@ -244,6 +251,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: pgo-deployer-crb + labels: + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -258,17 +267,21 @@ kind: Job metadata: name: pgo-deploy namespace: pgo + labels: + vendor: crunchydata spec: backoffLimit: 0 template: metadata: name: pgo-deploy + labels: + vendor: crunchydata spec: serviceAccountName: pgo-deployer-sa restartPolicy: Never containers: - name: pgo-deploy - image: registry.developers.crunchydata.com/crunchydata/pgo-deployer:centos7-4.5.0 + image: registry.developers.crunchydata.com/crunchydata/pgo-deployer:ubi8-4.6.10 imagePullPolicy: IfNotPresent env: - name: DEPLOY_ACTION diff --git a/installers/metrics/ansible/README.md b/installers/metrics/ansible/README.md index 1c047d1a85..c14ed41eb7 100644 --- a/installers/metrics/ansible/README.md +++ b/installers/metrics/ansible/README.md @@ -1,15 +1,15 @@ -# Crunchy Data PostgreSQL Operator Monitoring Playbook +# PGO: Postgres Operator Monitoring Playbook

- Crunchy Data + Crunchy Data

-Latest Release: 4.5.0 +Latest Release: 4.6.10 ## General -This repository contains Ansible Roles for deploying the metrics stack for the -Crunchy PostgreSQL Operator. +This repository contains Ansible Roles for deploying the metrics stack for PGO, +the Postgres Operator from [Crunchy Data](https://www.crunchydata.com). -See the [official Crunchy PostgreSQL Operator documentation](https://access.crunchydata.com/documentation/postgres-operator/) +See the [PGO documentation](https://access.crunchydata.com/documentation/postgres-operator/) for more information. diff --git a/installers/metrics/ansible/roles/pgo-metrics/defaults/main.yml b/installers/metrics/ansible/roles/pgo-metrics/defaults/main.yml index 775d6691f5..b2e217360b 100644 --- a/installers/metrics/ansible/roles/pgo-metrics/defaults/main.yml +++ b/installers/metrics/ansible/roles/pgo-metrics/defaults/main.yml @@ -9,7 +9,7 @@ delete_metrics_namespace: "false" metrics_namespace: "pgo" metrics_image_pull_secret: "" metrics_image_pull_secret_manifest: "" -pgmonitor_version: "v4.4-RC7" +pgmonitor_version: "v4.4" alertmanager_configmap: "alertmanager-config" alertmanager_rules_configmap: "alertmanager-rules-config" @@ -29,7 +29,7 @@ grafana_admin_password: "" grafana_install: "true" grafana_image_prefix: "grafana" grafana_image_name: "grafana" -grafana_image_tag: "6.7.4" +grafana_image_tag: "6.7.5" grafana_port: "3000" grafana_service_name: "crunchy-grafana" grafana_service_type: "ClusterIP" @@ -45,7 +45,7 @@ prometheus_custom_config: "" prometheus_install: "true" prometheus_image_prefix: "prom" prometheus_image_name: "prometheus" -prometheus_image_tag: "v2.20.0" +prometheus_image_tag: "v2.24.0" prometheus_port: "9090" prometheus_service_name: "crunchy-prometheus" prometheus_service_type: "ClusterIP" diff --git a/installers/metrics/ansible/roles/pgo-metrics/tasks/alertmanager.yml b/installers/metrics/ansible/roles/pgo-metrics/tasks/alertmanager.yml index dc82e92d1a..9c27302579 100644 --- a/installers/metrics/ansible/roles/pgo-metrics/tasks/alertmanager.yml +++ b/installers/metrics/ansible/roles/pgo-metrics/tasks/alertmanager.yml @@ -16,7 +16,7 @@ - name: Set pgmonitor Prometheus Directory Fact set_fact: - pgmonitor_prometheus_dir: "{{ metrics_dir }}/pgmonitor-{{ pgmonitor_version | replace('v','') }}/prometheus" + pgmonitor_prometheus_dir: "{{ pgmonitor_dir }}/prometheus" - name: Copy Alertmanger Config to Output Directory command: "cp {{ pgmonitor_prometheus_dir }}/{{ item.src }} {{ alertmanager_output_dir }}/{{ item.dst }}" diff --git a/installers/metrics/ansible/roles/pgo-metrics/tasks/grafana.yml b/installers/metrics/ansible/roles/pgo-metrics/tasks/grafana.yml index 1d528429b5..020e8cfa6d 100644 --- a/installers/metrics/ansible/roles/pgo-metrics/tasks/grafana.yml +++ b/installers/metrics/ansible/roles/pgo-metrics/tasks/grafana.yml @@ -9,7 +9,7 @@ grafana_output_dir: "{{ metrics_dir }}/output/grafana" - name: Ensure Output Directory Exists - file: + file: path: "{{ grafana_output_dir }}" state: "directory" mode: "0700" @@ -48,7 +48,7 @@ - name: Set pgmonitor Grafana Directory Fact set_fact: - pgmonitor_grafana_dir: "{{ metrics_dir }}/pgmonitor-{{ pgmonitor_version | replace('v','') }}/grafana" + pgmonitor_grafana_dir: "{{ pgmonitor_dir }}/grafana" - name: Copy Grafana Config to Output Directory command: "cp {{ pgmonitor_grafana_dir }}/{{ item }} {{ grafana_output_dir }}" @@ -111,7 +111,7 @@ src: "{{ item }}" dest: "{{ grafana_output_dir }}/{{ item | replace('.j2', '') }}" mode: "0600" - loop: + loop: - grafana-pvc.json.j2 - grafana-service.json.j2 - grafana-deployment.json.j2 diff --git a/installers/metrics/ansible/roles/pgo-metrics/tasks/main.yml b/installers/metrics/ansible/roles/pgo-metrics/tasks/main.yml index 425d3f8e1b..8c781a0b2b 100644 --- a/installers/metrics/ansible/roles/pgo-metrics/tasks/main.yml +++ b/installers/metrics/ansible/roles/pgo-metrics/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Set Metrics Directory Fact set_fact: - metrics_dir: "{{ ansible_env.HOME }}/.pgo/metrics/{{ metrics_namespace }}" + metrics_dir: "{{ ansible_env.HOME }}/.pgo/metrics" tags: always - name: Ensure Output Directory Exists @@ -54,16 +54,46 @@ - install-metrics - update-metrics block: + - name: Check for pgmonitor + stat: + path: "/opt/crunchy/pgmonitor" + register: pgmonitor_dir_embed + + - name: Set pgMonitor Directory Fact + block: + - name: Embeded Path + set_fact: + pgmonitor_dir: "/opt/crunchy/pgmonitor" + when: pgmonitor_dir_embed.stat.exists + + - name: Downloaded Path + set_fact: + pgmonitor_dir: "{{ metrics_dir }}/pgmonitor" + when: not pgmonitor_dir_embed.stat.exists + + - name: Ensure pgMonitor Output Directory Exists + file: + path: "{{ pgmonitor_dir }}" + state: directory + mode: 0700 + when: not pgmonitor_dir_embed.stat.exists + - name: Download pgmonitor {{ pgmonitor_version }} get_url: url: https://github.com/CrunchyData/pgmonitor/archive/{{ pgmonitor_version }}.tar.gz dest: "{{ metrics_dir }}" mode: "0600" + when: not pgmonitor_dir_embed.stat.exists - name: Extract pgmonitor unarchive: src: "{{ metrics_dir }}/pgmonitor-{{ pgmonitor_version | replace('v','') }}.tar.gz" dest: "{{ metrics_dir }}" + when: not pgmonitor_dir_embed.stat.exists + + - name: Copy pgmonitor to correct directory + command: "cp -R {{ metrics_dir }}/pgmonitor-{{ pgmonitor_version | replace('v','') }}/. {{ pgmonitor_dir }}" + when: not pgmonitor_dir_embed.stat.exists - name: Create Metrics Image Pull Secret shell: > @@ -107,6 +137,8 @@ poll: 0 loop: "{{ deployments }}" register: deployment_results + vars: + ansible_async_dir: /tmp/.ansible_async - name: Check Metrics Deployment Status async_status: diff --git a/installers/metrics/ansible/roles/pgo-metrics/tasks/prometheus.yml b/installers/metrics/ansible/roles/pgo-metrics/tasks/prometheus.yml index ffcfa7c625..729fd4762e 100644 --- a/installers/metrics/ansible/roles/pgo-metrics/tasks/prometheus.yml +++ b/installers/metrics/ansible/roles/pgo-metrics/tasks/prometheus.yml @@ -9,7 +9,7 @@ prom_output_dir: "{{ metrics_dir }}/output/prom" - name: Ensure Output Directory Exists - file: + file: path: "{{ prom_output_dir }}" state: "directory" mode: "0700" @@ -22,7 +22,7 @@ loop: - prometheus-rbac.json.j2 when: create_rbac | bool - + - name: Create Prometheus RBAC command: "{{ kubectl_or_oc }} create -f {{ prom_output_dir }}/{{ item }} -n {{ metrics_namespace }}" loop: @@ -35,7 +35,7 @@ - name: Set pgmonitor Prometheus Directory Fact set_fact: - pgmonitor_prometheus_dir: "{{ metrics_dir }}/pgmonitor-{{ pgmonitor_version | replace('v','') }}/prometheus" + pgmonitor_prometheus_dir: "{{ pgmonitor_dir }}/prometheus" - name: Copy Prometheus Config to Output Directory command: "cp {{ pgmonitor_prometheus_dir }}/{{ item.src }} {{ prom_output_dir }}/{{ item.dst }}" @@ -88,7 +88,7 @@ src: "{{ item }}" dest: "{{ prom_output_dir }}/{{ item | replace('.j2', '') }}" mode: "0600" - loop: + loop: - prometheus-pvc.json.j2 - prometheus-service.json.j2 - prometheus-deployment.json.j2 diff --git a/installers/metrics/ansible/roles/pgo-metrics/templates/alertmanager-deployment.json.j2 b/installers/metrics/ansible/roles/pgo-metrics/templates/alertmanager-deployment.json.j2 index bcae32cc41..6f4ef836d3 100644 --- a/installers/metrics/ansible/roles/pgo-metrics/templates/alertmanager-deployment.json.j2 +++ b/installers/metrics/ansible/roles/pgo-metrics/templates/alertmanager-deployment.json.j2 @@ -30,7 +30,8 @@ {% if not (disable_fsgroup | default(false) | bool) %} {% if (alertmanager_supplemental_groups | default('')) != '' %},{% endif -%} "fsGroup": 26, - "runAsUser": 2 + "runAsUser": 2, + "runAsNonRoot": true {% endif %} }, "serviceAccountName": "alertmanager", diff --git a/installers/metrics/ansible/roles/pgo-metrics/templates/grafana-deployment.json.j2 b/installers/metrics/ansible/roles/pgo-metrics/templates/grafana-deployment.json.j2 index f3815541d6..7759430969 100644 --- a/installers/metrics/ansible/roles/pgo-metrics/templates/grafana-deployment.json.j2 +++ b/installers/metrics/ansible/roles/pgo-metrics/templates/grafana-deployment.json.j2 @@ -30,7 +30,8 @@ {% if not (disable_fsgroup | default(false) | bool) %} {% if (grafana_supplemental_groups | default('')) != '' %},{% endif -%} "fsGroup": 26, - "runAsUser": 2 + "runAsUser": 2, + "runAsNonRoot": true {% endif %} }, "serviceAccountName": "grafana", diff --git a/installers/metrics/ansible/roles/pgo-metrics/templates/prometheus-deployment.json.j2 b/installers/metrics/ansible/roles/pgo-metrics/templates/prometheus-deployment.json.j2 index 64980cb2b5..2618f07bed 100644 --- a/installers/metrics/ansible/roles/pgo-metrics/templates/prometheus-deployment.json.j2 +++ b/installers/metrics/ansible/roles/pgo-metrics/templates/prometheus-deployment.json.j2 @@ -30,7 +30,8 @@ {% if not (disable_fsgroup | default(false) | bool) %} {% if (prometheus_supplemental_groups | default('')) != '' %},{% endif -%} "fsGroup": 26, - "runAsUser": 2 + "runAsUser": 2, + "runAsNonRoot": true {% endif %} }, "serviceAccountName": "prometheus-sa", diff --git a/installers/metrics/helm/Chart.yaml b/installers/metrics/helm/Chart.yaml index 603cab3982..b0133bdc6c 100644 --- a/installers/metrics/helm/Chart.yaml +++ b/installers/metrics/helm/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: postgres-operator-monitoring description: Install for Crunchy PostgreSQL Operator Monitoring type: application -version: 0.1.0 -appVersion: 4.5.0 +version: 0.2.0 +appVersion: 4.6.10 home: https://github.com/CrunchyData/postgres-operator -icon: https://github.com/CrunchyData/postgres-operator/raw/master/crunchy_logo.png \ No newline at end of file +icon: https://github.com/CrunchyData/postgres-operator/raw/master/docs/static/logos/pgo.svg diff --git a/installers/metrics/helm/helm_template.yaml b/installers/metrics/helm/helm_template.yaml index d5e346dbc7..63647f1f90 100644 --- a/installers/metrics/helm/helm_template.yaml +++ b/installers/metrics/helm/helm_template.yaml @@ -20,5 +20,5 @@ serviceAccount: # the image prefix and tag to use for the 'pgo-deployer' container pgo_image_prefix: "registry.developers.crunchydata.com/crunchydata" -pgo_image_tag: "centos7-4.5.0" +pgo_image_tag: "ubi8-4.6.10" diff --git a/installers/metrics/helm/values.yaml b/installers/metrics/helm/values.yaml index 9f2ecefb63..990f7343ee 100644 --- a/installers/metrics/helm/values.yaml +++ b/installers/metrics/helm/values.yaml @@ -20,7 +20,7 @@ serviceAccount: # the image prefix and tag to use for the 'pgo-deployer' container pgo_image_prefix: "registry.developers.crunchydata.com/crunchydata" -pgo_image_tag: "centos7-4.5.0" +pgo_image_tag: "ubi8-4.6.10" # ===================== # Configuration Options diff --git a/installers/metrics/kubectl/postgres-operator-metrics-ocp311.yml b/installers/metrics/kubectl/postgres-operator-metrics-ocp311.yml index ca4daafd16..afdd9dc133 100644 --- a/installers/metrics/kubectl/postgres-operator-metrics-ocp311.yml +++ b/installers/metrics/kubectl/postgres-operator-metrics-ocp311.yml @@ -5,6 +5,7 @@ metadata: namespace: pgo labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -13,6 +14,7 @@ metadata: namespace: pgo labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -29,6 +31,7 @@ metadata: namespace: pgo labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata data: values.yaml: |- # ===================== @@ -84,6 +87,7 @@ metadata: namespace: pgo labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata spec: backoffLimit: 0 template: @@ -91,12 +95,13 @@ spec: name: pgo-metrics-deploy labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata spec: serviceAccountName: pgo-metrics-deployer-sa restartPolicy: Never containers: - name: pgo-metrics-deploy - image: registry.developers.crunchydata.com/crunchydata/pgo-deployer:centos7-4.5.0 + image: registry.developers.crunchydata.com/crunchydata/pgo-deployer:ubi8-4.6.10 imagePullPolicy: IfNotPresent env: - name: DEPLOY_ACTION diff --git a/installers/metrics/kubectl/postgres-operator-metrics.yml b/installers/metrics/kubectl/postgres-operator-metrics.yml index e1cc94fd5a..dabb857554 100644 --- a/installers/metrics/kubectl/postgres-operator-metrics.yml +++ b/installers/metrics/kubectl/postgres-operator-metrics.yml @@ -5,6 +5,7 @@ metadata: namespace: pgo labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 @@ -12,6 +13,7 @@ metadata: name: pgo-metrics-deployer-cr labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata rules: - apiGroups: - '' @@ -83,6 +85,7 @@ metadata: namespace: pgo labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata data: values.yaml: |- # ===================== @@ -137,6 +140,7 @@ metadata: name: pgo-metrics-deployer-crb labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -153,6 +157,7 @@ metadata: namespace: pgo labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata spec: backoffLimit: 0 template: @@ -160,12 +165,13 @@ spec: name: pgo-metrics-deploy labels: app.kubernetes.io/name: postgres-operator-monitoring + vendor: crunchydata spec: serviceAccountName: pgo-metrics-deployer-sa restartPolicy: Never containers: - name: pgo-metrics-deploy - image: registry.developers.crunchydata.com/crunchydata/pgo-deployer:centos7-4.5.0 + image: registry.developers.crunchydata.com/crunchydata/pgo-deployer:ubi8-4.6.10 imagePullPolicy: IfNotPresent env: - name: DEPLOY_ACTION diff --git a/installers/olm/Makefile b/installers/olm/Makefile index 9fefafd441..783acbc046 100644 --- a/installers/olm/Makefile +++ b/installers/olm/Makefile @@ -2,16 +2,16 @@ .SUFFIXES: CCP_IMAGE_PREFIX ?= registry.developers.crunchydata.com/crunchydata -CCP_PG_FULLVERSION ?= 12.4 +CCP_PG_FULLVERSION ?= 13.10 CCP_POSTGIS_VERSION ?= 3.0 CONTAINER ?= docker KUBECONFIG ?= $(HOME)/.kube/config OLM_SDK_VERSION ?= 0.15.1 OLM_TOOLS ?= registry.localhost:5000/postgres-operator-olm-tools:$(OLM_SDK_VERSION) OLM_VERSION ?= 0.15.1 -PGO_BASEOS ?= centos7 +PGO_BASEOS ?= ubi8 PGO_IMAGE_PREFIX ?= registry.developers.crunchydata.com/crunchydata -PGO_VERSION ?= 4.5.0 +PGO_VERSION ?= 4.6.10 PGO_IMAGE_TAG ?= $(PGO_BASEOS)-$(PGO_VERSION) CCP_IMAGE_TAG ?= $(PGO_BASEOS)-$(CCP_PG_FULLVERSION)-$(PGO_VERSION) CCP_POSTGIS_IMAGE_TAG ?= $(PGO_BASEOS)-$(CCP_PG_FULLVERSION)-$(CCP_POSTGIS_VERSION)-$(PGO_VERSION) diff --git a/installers/olm/README.md b/installers/olm/README.md index 207f85daa8..51a04fedef 100644 --- a/installers/olm/README.md +++ b/installers/olm/README.md @@ -1,13 +1,33 @@ - This directory contains the files that are used to install [Crunchy PostgreSQL for Kubernetes][hub-listing], -which uses the PostgreSQL Operator, using [Operator Lifecycle Manager][OLM]. +which includes PGO, the Postgres Operator from [Crunchy Data][crunchy-data], using [Operator Lifecycle Manager][OLM]. The integration centers around a [ClusterServiceVersion][olm-csv] [manifest](./postgresoperator.csv.yaml) that gets packaged for OperatorHub. Changes there are accepted only if they pass all the [scorecard][] tests. Consult the [technical requirements][hub-contrib] when making changes. +[crunchy-data]: https://www.crunchydata.com [hub-contrib]: https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md [hub-listing]: https://operatorhub.io/operator/postgresql [olm-csv]: https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md [OLM]: https://github.com/operator-framework/operator-lifecycle-manager [scorecard]: https://sdk.operatorframework.io/docs/scorecard/ + +## Testing + +### Setup + +``` +make docker-package docker-verify +``` + +``` +pip3 install yq +``` + +### Testing + +``` +make install-olm # install OLM framework +make package # build OLM package +make verify # verify OLM package +``` diff --git a/installers/olm/description.openshift.md b/installers/olm/description.openshift.md index ad31cbe1e5..dcb3116fff 100644 --- a/installers/olm/description.openshift.md +++ b/installers/olm/description.openshift.md @@ -15,6 +15,9 @@ providing the essential features you need to keep your PostgreSQL clusters up an Set how long you want your backups retained for. Works great with very large databases! - **Monitoring**: Track the health of your PostgreSQL clusters using the open source [pgMonitor][] library. - **Clone**: Create new clusters from your existing clusters or backups with a single [`pgo create cluster --restore-from`][pgo-create-cluster] command. +- **TLS**: Secure communication between your applications and data servers by [enabling TLS for your PostgreSQL servers][pgo-task-tls], including the ability to enforce that all of your connections to use TLS. +- **Connection Pooling**: Use [pgBouncer][] for connection pooling +- **Affinity and Tolerations**: Have your PostgreSQL clusters deployed to [Kubernetes Nodes][k8s-nodes] of your preference with [node affinity][high-availability-node-affinity], or designate which nodes Kubernetes can schedule PostgreSQL instances to with Kubernetes [tolerations][high-availability-tolerations]. - **Full Customizability**: Crunchy PostgreSQL for OpenShift makes it easy to get your own PostgreSQL-as-a-Service up and running on and lets make further enhancements to customize your deployments, including: - Selecting different storage classes for your primary, replica, and backup storage @@ -27,16 +30,20 @@ and much more! [disaster-recovery]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/disaster-recovery/ [high-availability]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/ +[high-availability-node-affinity]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/#node-affinity +[high-availability-tolerations]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/#tolerations [pgo-create-cluster]: https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/reference/pgo_create_cluster/ +[pgo-task-tls]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/tls/ [provisioning]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/provisioning/ [k8s-anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity +[k8s-nodes]: https://kubernetes.io/docs/concepts/architecture/nodes/ [pgBackRest]: https://www.pgbackrest.org +[pgBouncer]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/pgbouncer/ [pgMonitor]: https://github.com/CrunchyData/pgmonitor - -## Before You Begin +## Pre-Installation There are a few manual steps that the cluster administrator must perform prior to installing the PostgreSQL Operator. At the very least, it must be provided with an initial configuration. @@ -49,13 +56,6 @@ export PGO_OPERATOR_NAMESPACE=pgo oc create namespace "$PGO_OPERATOR_NAMESPACE" ``` -Next, clone the PostgreSQL Operator repository locally. - -``` -git clone -b v${PGO_VERSION} https://github.com/CrunchyData/postgres-operator.git -cd postgres-operator -``` - ### Security For the PostgreSQL Operator and PostgreSQL clusters to run in the recommended `restricted` [Security Context Constraint][], @@ -63,39 +63,22 @@ edit `conf/postgres-operator/pgo.yaml` and set `DisableFSGroup` to `true`. [Security Context Constraint]: https://docs.openshift.com/container-platform/latest/authentication/managing-security-context-constraints.html -### PostgreSQL Operator Configuration +### Secrets (optional) -Edit `conf/postgres-operator/pgo.yaml` to configure the deployment. Look over all of the options and make any -changes necessary for your environment. A [full description of each option][pgo-yaml-reference] is available in the documentation. - -[pgo-yaml-reference]: https://access.crunchydata.com/documentation/postgres-operator/${PGO_VERSION}/configuration/pgo-yaml-configuration/ - -When the file is ready, upload the entire directory to the `pgo-config` ConfigMap. - -``` -oc -n "$PGO_OPERATOR_NAMESPACE" create configmap pgo-config \ - --from-file=./conf/postgres-operator -``` - -### Secrets - -Configure pgBackRest for your environment. If you do not plan to use AWS S3 to store backups, you can omit -the `aws-s3` keys below. +If you plan to use AWS S3 to store backups, you can configure your environment to automatically provide your AWS S3 credentials to all newly created PostgreSQL clusters: ``` oc -n "$PGO_OPERATOR_NAMESPACE" create secret generic pgo-backrest-repo-config \ - --from-file=./installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/config \ - --from-file=./installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/sshd_config \ - --from-file=./installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/aws-s3-ca.crt \ --from-literal=aws-s3-key="" \ --from-literal=aws-s3-key-secret="" +oc -n "$PGO_OPERATOR_NAMESPACE" label secret pgo-backrest-repo-config vendor=crunchydata ``` ### Certificates (optional) -The PostgreSQL Operator has an API that uses TLS to communicate securely with clients. If you have -a certificate bundle validated by your organization, you can install it now. If not, the API will -automatically generate and use a self-signed certificate. +The PostgreSQL Operator has an API that uses TLS to communicate securely with clients. If one is not provided, the API will automatically generated one for you. + +If you have a certificate bundle validated by your organization, you can install it now. ``` oc -n "$PGO_OPERATOR_NAMESPACE" create secret tls pgo.tls \ @@ -105,8 +88,156 @@ oc -n "$PGO_OPERATOR_NAMESPACE" create secret tls pgo.tls \ Once these resources are in place, the PostgreSQL Operator can be installed into the cluster. +## Installation + +You can now go ahead and install the PostgreSQL Operator from OperatorHub. + +### Security + +For the PostgreSQL Operator and PostgreSQL clusters to run in the recommended `restricted` [Security Context Constraint][], +edit the ConfigMap `pgo-config`, find the `pgo.yaml` entry, and set `DisableFSGroup` to `true`. + +[Security Context Constraint]: https://docs.openshift.com/container-platform/latest/authentication/managing-security-context-constraints.html + +You will have to scale the `postgres-operator` Deployment down and up for the above change to take effect: + +``` +oc -n pgo scale --replicas 0 deployment/postgres-operator +oc -n pgo scale --replicas 1 deployment/postgres-operator +``` + +## Post-Installation + +### Tutorial + +For a guide on how to perform many of the daily functions of the PostgreSQL Operator, we recommend that you read the [Postgres Operator tutorial][pgo-tutorial] + +[pgo-tutorial]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/create-cluster/ + +However, the below guide will show you how to create a Postgres cluster from a custom resource or from using the `pgo-client`. + +### Create a PostgreSQL Cluster from a Custom Resource + +The fundamental workflow for interfacing with a PostgreSQL Operator Custom +Resource Definition is for creating a PostgreSQL cluster. There are several +that a PostgreSQL cluster requires to be deployed, including: + +- Secrets + - Information for setting up a pgBackRest repository + - PostgreSQL superuser bootstrap credentials + - PostgreSQL replication user bootstrap credentials + - PostgresQL standard user bootstrap credentials + +Additionally, if you want to add some of the other sidecars, you may need to +create additional secrets. -## After You Install +The good news is that if you do not provide these objects, the PostgreSQL +Operator will create them for you to get your Postgres cluster up and running! + +The following goes through how to create a PostgreSQL cluster called +`hippo` by creating a new custom resource. + +``` +# this variable is the name of the cluster being created +export pgo_cluster_name=hippo +# this variable is the namespace the cluster is being deployed into +export cluster_namespace=pgo +# this variable is set to the location of your image repository +export cluster_image_prefix=registry.developers.crunchydata.com/crunchydata + +cat <<-EOF > "${pgo_cluster_name}-pgcluster.yaml" +apiVersion: crunchydata.com/v1 +kind: Pgcluster +metadata: + annotations: + current-primary: ${pgo_cluster_name} + labels: + crunchy-pgha-scope: ${pgo_cluster_name} + deployment-name: ${pgo_cluster_name} + name: ${pgo_cluster_name} + pg-cluster: ${pgo_cluster_name} + pgo-version: ${PGO_VERSION} + pgouser: admin + name: ${pgo_cluster_name} + namespace: ${cluster_namespace} +spec: + BackrestStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: "" + size: 1G + storageclass: "" + storagetype: create + supplementalgroups: "" + PrimaryStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: ${pgo_cluster_name} + size: 1G + storageclass: "" + storagetype: create + supplementalgroups: "" + ReplicaStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: "" + size: 1G + storageclass: "" + storagetype: create + supplementalgroups: "" + annotations: {} + ccpimage: crunchy-postgres-ha + ccpimageprefix: ${cluster_image_prefix} + ccpimagetag: ubi8-13.10-${PGO_VERSION} + clustername: ${pgo_cluster_name} + database: ${pgo_cluster_name} + exporterport: "9187" + limits: {} + name: ${pgo_cluster_name} + namespace: ${cluster_namespace} + pgDataSource: + restoreFrom: "" + restoreOpts: "" + pgbadgerport: "10000" + pgoimageprefix: ${cluster_image_prefix} + podAntiAffinity: + default: preferred + pgBackRest: preferred + pgBouncer: preferred + port: "5432" + tolerations: [] + user: hippo + userlabels: + pgo-version: ${PGO_VERSION} +EOF + +oc apply -f "${pgo_cluster_name}-pgcluster.yaml" +``` + +And that's all! The PostgreSQL Operator will go ahead and create the cluster. + +If you have the PostgreSQL client `psql` installed on your host machine, you can +test connection to the PostgreSQL cluster using the following command: + +``` +# namespace that the cluster is running in +export PGO_OPERATOR_NAMESPACE=pgo +# name of the cluster +export pgo_cluster_name=hippo +# name of the user whose password we want to get +export pgo_cluster_username=hippo + +# get the password of the user and set it to a recognized psql environmental variable +export PGPASSWORD=$(oc -n "${PGO_OPERATOR_NAMESPACE}" get secrets \ + "${pgo_cluster_name}-${pgo_cluster_username}-secret" -o "jsonpath={.data['password']}" | base64 -d) + +# set up a port-forward either in a new terminal, or in the same terminal in the background: +oc -n pgo port-forward svc/hippo 5432:5432 & + +psql -h localhost -U "${pgo_cluster_username}" "${pgo_cluster_name}" +``` + +### Create a PostgreSQL Cluster the `pgo` Client Once the PostgreSQL Operator is installed in your OpenShift cluster, you will need to do a few things to use the [PostgreSQL Operator Client][pgo-client]. @@ -116,8 +247,13 @@ to use the [PostgreSQL Operator Client][pgo-client]. Install the first set of client credentials and download the `pgo` binary and client certificates. ``` -PGO_CMD=oc ./deploy/install-bootstrap-creds.sh -PGO_CMD=oc ./installers/kubectl/client-setup.sh +curl https://raw.githubusercontent.com/CrunchyData/postgres-operator/v${PGO_VERSION}/deploy/install-bootstrap-creds.sh > install-bootstrap-creds.sh +curl https://raw.githubusercontent.com/CrunchyData/postgres-operator/v${PGO_VERSION}/installers/kubectl/client-setup.sh > client-setup.sh + +chmod +x install-bootstrap-creds.sh client-setup.sh + +PGO_CMD=oc ./install-bootstrap-creds.sh +PGO_CMD=oc ./client-setup.sh ``` The client needs to be able to reach the PostgreSQL Operator API from outside the OpenShift cluster. @@ -143,3 +279,37 @@ pgo version # pgo client version ${PGO_VERSION} # pgo-apiserver version ${PGO_VERSION} ``` + + +You can then create a cluster with the `pgo` client as simply as this: + +``` +pgo create cluster -n pgo hippo +``` + +The cluster may take a few moments to provision. You can verify that the cluster is up and running by using the `pgo test` command: + +``` +pgo test cluster -n pgo hippo +``` + +If you have the PostgreSQL client `psql` installed on your host machine, you can +test connection to the PostgreSQL cluster using the following command: + +``` +# namespace that the cluster is running in +export PGO_OPERATOR_NAMESPACE=pgo +# name of the cluster +export pgo_cluster_name=hippo +# name of the user whose password we want to get +export pgo_cluster_username=hippo + +# get the password of the user and set it to a recognized psql environmental variable +export PGPASSWORD=$(kubectl -n "${PGO_OPERATOR_NAMESPACE}" get secrets \ + "${pgo_cluster_name}-${pgo_cluster_username}-secret" -o "jsonpath={.data['password']}" | base64 -d) + +# set up a port-forward either in a new terminal, or in the same terminal in the background: +kubectl -n pgo port-forward svc/hippo 5432:5432 & + +psql -h localhost -U "${pgo_cluster_username}" "${pgo_cluster_name}" +``` diff --git a/installers/olm/description.upstream.md b/installers/olm/description.upstream.md index 8838098032..df2e148fd4 100644 --- a/installers/olm/description.upstream.md +++ b/installers/olm/description.upstream.md @@ -15,6 +15,9 @@ providing the essential features you need to keep your PostgreSQL clusters up an Set how long you want your backups retained for. Works great with very large databases! - **Monitoring**: Track the health of your PostgreSQL clusters using the open source [pgMonitor][] library. - **Clone**: Create new clusters from your existing clusters or backups with a single [`pgo create cluster --restore-from`][pgo-create-cluster] command. +- **TLS**: Secure communication between your applications and data servers by [enabling TLS for your PostgreSQL servers][pgo-task-tls], including the ability to enforce that all of your connections to use TLS. +- **Connection Pooling**: Use [pgBouncer][] for connection pooling +- **Affinity and Tolerations**: Have your PostgreSQL clusters deployed to [Kubernetes Nodes][k8s-nodes] of your preference with [node affinity][high-availability-node-affinity], or designate which nodes Kubernetes can schedule PostgreSQL instances to with Kubernetes [tolerations][high-availability-tolerations]. - **Full Customizability**: Crunchy PostgreSQL for Kubernetes makes it easy to get your own PostgreSQL-as-a-Service up and running on and lets make further enhancements to customize your deployments, including: - Selecting different storage classes for your primary, replica, and backup storage @@ -27,16 +30,21 @@ and much more! [disaster-recovery]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/disaster-recovery/ [high-availability]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/ +[high-availability-node-affinity]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/#node-affinity +[high-availability-tolerations]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/high-availability/#tolerations [pgo-create-cluster]: https://access.crunchydata.com/documentation/postgres-operator/latest/pgo-client/reference/pgo_create_cluster/ +[pgo-task-tls]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/tls/ [provisioning]: https://access.crunchydata.com/documentation/postgres-operator/latest/architecture/provisioning/ [k8s-anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity +[k8s-nodes]: https://kubernetes.io/docs/concepts/architecture/nodes/ [pgBackRest]: https://www.pgbackrest.org +[pgBouncer]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/pgbouncer/ [pgMonitor]: https://github.com/CrunchyData/pgmonitor -## Before You Begin +## Pre-Installation There are a few manual steps that the cluster administrator must perform prior to installing the PostgreSQL Operator. At the very least, it must be provided with an initial configuration. @@ -49,57 +57,167 @@ export PGO_OPERATOR_NAMESPACE=pgo kubectl create namespace "$PGO_OPERATOR_NAMESPACE" ``` -Next, clone the PostgreSQL Operator repository locally. +### Secrets (optional) + +If you plan to use AWS S3 to store backups, you can configure your environment to automatically provide your AWS S3 credentials to all newly created PostgreSQL clusters: ``` -git clone -b v${PGO_VERSION} https://github.com/CrunchyData/postgres-operator.git -cd postgres-operator +kubectl -n "$PGO_OPERATOR_NAMESPACE" create secret generic pgo-backrest-repo-config \ + --from-literal=aws-s3-key="" \ + --from-literal=aws-s3-key-secret="" +kubectl -n "$PGO_OPERATOR_NAMESPACE" label secret pgo-backrest-repo-config vendor=crunchydata ``` -### PostgreSQL Operator Configuration - -Edit `conf/postgres-operator/pgo.yaml` to configure the deployment. Look over all of the options and make any -changes necessary for your environment. A [full description of each option][pgo-yaml-reference] is available in the documentation. +### Certificates (optional) -[pgo-yaml-reference]: https://access.crunchydata.com/documentation/postgres-operator/${PGO_VERSION}/configuration/pgo-yaml-configuration/ +The PostgreSQL Operator has an API that uses TLS to communicate securely with clients. If one is not provided, the API will automatically generated one for you. -When the file is ready, upload the entire directory to the `pgo-config` ConfigMap. +If you have a certificate bundle validated by your organization, you can install it now. ``` -kubectl -n "$PGO_OPERATOR_NAMESPACE" create configmap pgo-config \ - --from-file=./conf/postgres-operator +kubectl -n "$PGO_OPERATOR_NAMESPACE" create secret tls pgo.tls \ + --cert=/path/to/server.crt \ + --key=/path/to/server.key ``` -### Secrets +Once these resources are in place, the PostgreSQL Operator can be installed into the cluster. + +## Installation + +You can now go ahead and install the PostgreSQL Operator from OperatorHub. + +## Post-Installation -Configure pgBackRest for your environment. If you do not plan to use AWS S3 to store backups, you can omit -the `aws-s3` keys below. +### Tutorial + +For a guide on how to perform many of the daily functions of the PostgreSQL Operator, we recommend that you read the [Postgres Operator tutorial][pgo-tutorial] + +[pgo-tutorial]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorial/create-cluster/ + +However, the below guide will show you how to create a Postgres cluster from a custom resource or from using the `pgo-client`. + +### Create a PostgreSQL Cluster from a Custom Resource + +The fundamental workflow for interfacing with a PostgreSQL Operator Custom +Resource Definition is for creating a PostgreSQL cluster. There are several +that a PostgreSQL cluster requires to be deployed, including: + +- Secrets + - Information for setting up a pgBackRest repository + - PostgreSQL superuser bootstrap credentials + - PostgreSQL replication user bootstrap credentials + - PostgresQL standard user bootstrap credentials + +Additionally, if you want to add some of the other sidecars, you may need to +create additional secrets. + +The good news is that if you do not provide these objects, the PostgreSQL +Operator will create them for you to get your Postgres cluster up and running! + +The following goes through how to create a PostgreSQL cluster called +`hippo` by creating a new custom resource. ``` -kubectl -n "$PGO_OPERATOR_NAMESPACE" create secret generic pgo-backrest-repo-config \ - --from-file=./installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/config \ - --from-file=./installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/sshd_config \ - --from-file=./installers/ansible/roles/pgo-operator/files/pgo-backrest-repo/aws-s3-ca.crt \ - --from-literal=aws-s3-key="" \ - --from-literal=aws-s3-key-secret="" +# this variable is the name of the cluster being created +export pgo_cluster_name=hippo +# this variable is the namespace the cluster is being deployed into +export cluster_namespace=pgo +# this variable is set to the location of your image repository +export cluster_image_prefix=registry.developers.crunchydata.com/crunchydata + +cat <<-EOF > "${pgo_cluster_name}-pgcluster.yaml" +apiVersion: crunchydata.com/v1 +kind: Pgcluster +metadata: + annotations: + current-primary: ${pgo_cluster_name} + labels: + crunchy-pgha-scope: ${pgo_cluster_name} + deployment-name: ${pgo_cluster_name} + name: ${pgo_cluster_name} + pg-cluster: ${pgo_cluster_name} + pgo-version: ${PGO_VERSION} + pgouser: admin + name: ${pgo_cluster_name} + namespace: ${cluster_namespace} +spec: + BackrestStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: "" + size: 1G + storageclass: "" + storagetype: create + supplementalgroups: "" + PrimaryStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: ${pgo_cluster_name} + size: 1G + storageclass: "" + storagetype: create + supplementalgroups: "" + ReplicaStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: "" + size: 1G + storageclass: "" + storagetype: create + supplementalgroups: "" + annotations: {} + ccpimage: crunchy-postgres-ha + ccpimageprefix: ${cluster_image_prefix} + ccpimagetag: ubi8-13.10-${PGO_VERSION} + clustername: ${pgo_cluster_name} + database: ${pgo_cluster_name} + exporterport: "9187" + limits: {} + name: ${pgo_cluster_name} + namespace: ${cluster_namespace} + pgDataSource: + restoreFrom: "" + restoreOpts: "" + pgbadgerport: "10000" + pgoimageprefix: ${cluster_image_prefix} + podAntiAffinity: + default: preferred + pgBackRest: preferred + pgBouncer: preferred + port: "5432" + tolerations: [] + user: hippo + userlabels: + pgo-version: ${PGO_VERSION} +EOF + +kubectl apply -f "${pgo_cluster_name}-pgcluster.yaml" ``` -### Certificates (optional) +And that's all! The PostgreSQL Operator will go ahead and create the cluster. -The PostgreSQL Operator has an API that uses TLS to communicate securely with clients. If you have -a certificate bundle validated by your organization, you can install it now. If not, the API will -automatically generate and use a self-signed certificate. +If you have the PostgreSQL client `psql` installed on your host machine, you can +test connection to the PostgreSQL cluster using the following command: ``` -kubectl -n "$PGO_OPERATOR_NAMESPACE" create secret tls pgo.tls \ - --cert=/path/to/server.crt \ - --key=/path/to/server.key -``` +# namespace that the cluster is running in +export PGO_OPERATOR_NAMESPACE=pgo +# name of the cluster +export pgo_cluster_name=hippo +# name of the user whose password we want to get +export pgo_cluster_username=hippo -Once these resources are in place, the PostgreSQL Operator can be installed into the cluster. +# get the password of the user and set it to a recognized psql environmental variable +export PGPASSWORD=$(kubectl -n "${PGO_OPERATOR_NAMESPACE}" get secrets \ + "${pgo_cluster_name}-${pgo_cluster_username}-secret" -o "jsonpath={.data['password']}" | base64 -d) +# set up a port-forward either in a new terminal, or in the same terminal in the background: +kubectl -n pgo port-forward svc/hippo 5432:5432 & -## After You Install +psql -h localhost -U "${pgo_cluster_username}" "${pgo_cluster_name}" +``` + +### Create a PostgreSQL Cluster the `pgo` Client Once the PostgreSQL Operator is installed in your Kubernetes cluster, you will need to do a few things to use the [PostgreSQL Operator Client][pgo-client]. @@ -109,8 +227,13 @@ to use the [PostgreSQL Operator Client][pgo-client]. Install the first set of client credentials and download the `pgo` binary and client certificates. ``` -PGO_CMD=kubectl ./deploy/install-bootstrap-creds.sh -PGO_CMD=kubectl ./installers/kubectl/client-setup.sh +curl https://raw.githubusercontent.com/CrunchyData/postgres-operator/v${PGO_VERSION}/deploy/install-bootstrap-creds.sh > install-bootstrap-creds.sh +curl https://raw.githubusercontent.com/CrunchyData/postgres-operator/v${PGO_VERSION}/installers/kubectl/client-setup.sh > client-setup.sh + +chmod +x install-bootstrap-creds.sh client-setup.sh + +PGO_CMD=kubectl ./install-bootstrap-creds.sh +PGO_CMD=kubectl ./client-setup.sh ``` The client needs to be able to reach the PostgreSQL Operator API from outside the Kubernetes cluster. @@ -138,3 +261,36 @@ pgo version # pgo client version ${PGO_VERSION} # pgo-apiserver version ${PGO_VERSION} ``` + +You can then create a cluster with the `pgo` client as simply as this: + +``` +pgo create cluster -n pgo hippo +``` + +The cluster may take a few moments to provision. You can verify that the cluster is up and running by using the `pgo test` command: + +``` +pgo test cluster -n pgo hippo +``` + +If you have the PostgreSQL client `psql` installed on your host machine, you can +test connection to the PostgreSQL cluster using the following command: + +``` +# namespace that the cluster is running in +export PGO_OPERATOR_NAMESPACE=pgo +# name of the cluster +export pgo_cluster_name=hippo +# name of the user whose password we want to get +export pgo_cluster_username=hippo + +# get the password of the user and set it to a recognized psql environmental variable +export PGPASSWORD=$(kubectl -n "${PGO_OPERATOR_NAMESPACE}" get secrets \ + "${pgo_cluster_name}-${pgo_cluster_username}-secret" -o "jsonpath={.data['password']}" | base64 -d) + +# set up a port-forward either in a new terminal, or in the same terminal in the background: +kubectl -n pgo port-forward svc/hippo 5432:5432 & + +psql -h localhost -U "${pgo_cluster_username}" "${pgo_cluster_name}" +``` diff --git a/installers/olm/postgresoperator.crd.descriptions.yaml b/installers/olm/postgresoperator.crd.descriptions.yaml index 5d76dd4e0c..15c5b274ba 100644 --- a/installers/olm/postgresoperator.crd.descriptions.yaml +++ b/installers/olm/postgresoperator.crd.descriptions.yaml @@ -56,22 +56,6 @@ x-descriptors: - 'urn:alm:descriptor:com.tectonic.ui:number' - - path: rootsecretname - displayName: PostgreSQL superuser credentials - description: The name of the Secret that contains the PostgreSQL superuser credentials - x-descriptors: - - 'urn:alm:descriptor:io.kubernetes:Secret' - - path: primarysecretname - displayName: PostgreSQL support service credentials - description: The name of the Secret that contains the credentials used for managing cluster instance authentication, e.g. connections for replicas - x-descriptors: - - 'urn:alm:descriptor:io.kubernetes:Secret' - - path: usersecretname - displayName: PostgreSQL user credentials - description: The name of the Secret that contains the PostgreSQL user credentials for logging into the PostgreSQL cluster - x-descriptors: - - 'urn:alm:descriptor:io.kubernetes:Secret' - # `operator-sdk scorecard` expects this field to have a descriptor. - path: PrimaryStorage displayName: PostgreSQL Primary Storage diff --git a/installers/olm/postgresoperator.crd.examples.yaml b/installers/olm/postgresoperator.crd.examples.yaml index d7783c2707..058e4b56f3 100644 --- a/installers/olm/postgresoperator.crd.examples.yaml +++ b/installers/olm/postgresoperator.crd.examples.yaml @@ -2,26 +2,54 @@ apiVersion: crunchydata.com/v1 kind: Pgcluster metadata: - name: example - labels: { archive: 'false' } + annotations: { current-primary: 'hippo' } + name: hippo + labels: + crunchy-pgha-scope: hippo + deployment-name: hippo + name: hippo + namespace: pgo + pg-cluster: hippo + pgo-version: '${PGO_VERSION}' spec: - name: example - clustername: example + name: hippo + namespace: pgo + clustername: hippo ccpimage: crunchy-postgres-ha ccpimagetag: '${CCP_IMAGE_TAG}' + BackrestStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: "" + size: 5Gi + storageclass: "" + storagetype: dynamic + supplementalgroups: "" PrimaryStorage: - accessmode: ReadWriteOnce - size: 1G - storageclass: standard + accessmode: ReadWriteMany + matchLabels: "" + name: hippo + size: 5Gi + storageclass: "" + storagetype: dynamic + supplementalgroups: "" + ReplicaStorage: + accessmode: ReadWriteMany + matchLabels: "" + name: "" + size: 5Gi + storageclass: "" storagetype: dynamic - database: example + supplementalgroups: "" + database: hippo exporterport: '9187' pgbadgerport: '10000' + podAntiAffinity: + default: preferred port: '5432' - primarysecretname: example-primaryuser - rootsecretname: example-postgresuser - usersecretname: example-primaryuser - userlabels: { archive: 'false' } + user: hippo + userlabels: + pgo-version: '${PGO_VERSION}' --- apiVersion: crunchydata.com/v1 diff --git a/installers/olm/postgresoperator.crd.yaml b/installers/olm/postgresoperator.crd.yaml index f39ac244fc..c8516550e1 100644 --- a/installers/olm/postgresoperator.crd.yaml +++ b/installers/olm/postgresoperator.crd.yaml @@ -3,6 +3,8 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: pgclusters.crunchydata.com + labels: + vendor: crunchydata spec: group: crunchydata.com names: @@ -24,13 +26,10 @@ spec: exporterport: { type: string } name: { type: string } pgbadgerport: { type: string } - primarysecretname: { type: string } PrimaryStorage: { type: object } port: { type: string } - rootsecretname: { type: string } status: { type: string } userlabels: { type: object } - usersecretname: { type: string } status: properties: state: { type: string } @@ -40,6 +39,8 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: pgpolicies.crunchydata.com + labels: + vendor: crunchydata spec: group: crunchydata.com names: @@ -61,6 +62,8 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: pgreplicas.crunchydata.com + labels: + vendor: crunchydata spec: group: crunchydata.com names: @@ -82,6 +85,8 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: pgtasks.crunchydata.com + labels: + vendor: crunchydata spec: group: crunchydata.com names: diff --git a/installers/olm/postgresoperator.csv.images.yaml b/installers/olm/postgresoperator.csv.images.yaml index 301d117a67..c8f3a386d6 100644 --- a/installers/olm/postgresoperator.csv.images.yaml +++ b/installers/olm/postgresoperator.csv.images.yaml @@ -4,19 +4,14 @@ - { name: PGO_IMAGE_PREFIX, value: '${PGO_IMAGE_PREFIX}' } - { name: PGO_IMAGE_TAG, value: '${PGO_IMAGE_TAG}' } -- { name: RELATED_IMAGE_PGO_BACKREST, value: '${PGO_IMAGE_PREFIX}/pgo-backrest:${PGO_IMAGE_TAG}' } -- { name: RELATED_IMAGE_PGO_BACKREST_REPO, value: '${PGO_IMAGE_PREFIX}/pgo-backrest-repo:${PGO_IMAGE_TAG}' } +- { name: RELATED_IMAGE_PGO_BACKREST, value: '${CCP_IMAGE_PREFIX}/crunchy-pgbackrest:${CCP_IMAGE_TAG}' } +- { name: RELATED_IMAGE_PGO_BACKREST_REPO, value: '${CCP_IMAGE_PREFIX}/crunchy-pgbackrest-repo:${CCP_IMAGE_TAG}' } - { name: RELATED_IMAGE_PGO_CLIENT, value: '${PGO_IMAGE_PREFIX}/pgo-client:${PGO_IMAGE_TAG}' } - { name: RELATED_IMAGE_PGO_RMDATA, value: '${PGO_IMAGE_PREFIX}/pgo-rmdata:${PGO_IMAGE_TAG}' } -- { name: RELATED_IMAGE_PGO_SQL_RUNNER, value: '${PGO_IMAGE_PREFIX}/pgo-sqlrunner:${PGO_IMAGE_TAG}' } - { name: RELATED_IMAGE_CRUNCHY_POSTGRES_EXPORTER, value: '${PGO_IMAGE_PREFIX}/crunchy-postgres-exporter:${PGO_IMAGE_TAG}' } -- { name: RELATED_IMAGE_CRUNCHY_ADMIN, value: '${CCP_IMAGE_PREFIX}/crunchy-admin:${CCP_IMAGE_TAG}' } -- { name: RELATED_IMAGE_CRUNCHY_BACKREST_RESTORE, value: '${CCP_IMAGE_PREFIX}/crunchy-backrest-restore:${CCP_IMAGE_TAG}' } - { name: RELATED_IMAGE_CRUNCHY_PGADMIN, value: '${CCP_IMAGE_PREFIX}/crunchy-pgadmin4:${CCP_IMAGE_TAG}' } - { name: RELATED_IMAGE_CRUNCHY_PGBADGER, value: '${CCP_IMAGE_PREFIX}/crunchy-pgbadger:${CCP_IMAGE_TAG}' } - { name: RELATED_IMAGE_CRUNCHY_PGBOUNCER, value: '${CCP_IMAGE_PREFIX}/crunchy-pgbouncer:${CCP_IMAGE_TAG}' } -- { name: RELATED_IMAGE_CRUNCHY_PGDUMP, value: '${CCP_IMAGE_PREFIX}/crunchy-pgdump:${CCP_IMAGE_TAG}' } -- { name: RELATED_IMAGE_CRUNCHY_PGRESTORE, value: '${CCP_IMAGE_PREFIX}/crunchy-pgrestore:${CCP_IMAGE_TAG}' } - { name: RELATED_IMAGE_CRUNCHY_POSTGRES_HA, value: '${CCP_IMAGE_PREFIX}/crunchy-postgres-ha:${CCP_IMAGE_TAG}' } - { name: RELATED_IMAGE_CRUNCHY_POSTGRES_GIS_HA, value: '${CCP_IMAGE_PREFIX}/crunchy-postgres-gis-ha:${CCP_POSTGIS_IMAGE_TAG}' } diff --git a/installers/olm/postgresoperator.csv.yaml b/installers/olm/postgresoperator.csv.yaml index a6ae30d051..fd7c5a6371 100644 --- a/installers/olm/postgresoperator.csv.yaml +++ b/installers/olm/postgresoperator.csv.yaml @@ -107,8 +107,6 @@ spec: - endpoints - pods - pods/exec - - pods/log - - replicasets - secrets - services - persistentvolumeclaims @@ -121,10 +119,19 @@ spec: - update - delete - deletecollection + - apiGroups: + - '' + resources: + - pods/log + verbs: + - get + - list + - watch - apiGroups: - apps resources: - deployments + - replicasets verbs: - get - list @@ -200,10 +207,15 @@ spec: vendor: crunchydata spec: serviceAccountName: postgres-operator + securityContext: + runAsNonRoot: true containers: - name: apiserver image: '${PGO_IMAGE_PREFIX}/pgo-apiserver:${PGO_IMAGE_TAG}' imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + privileged: false ports: - containerPort: 8443 readinessProbe: @@ -232,6 +244,9 @@ spec: - name: operator image: '${PGO_IMAGE_PREFIX}/postgres-operator:${PGO_IMAGE_TAG}' imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + privileged: false env: - { name: NAMESPACE, valueFrom: { fieldRef: { fieldPath: "metadata.annotations['olm.targetNamespaces']" } } } - { name: PGO_INSTALLATION_NAME, valueFrom: { fieldRef: { fieldPath: "metadata.namespace" } } } @@ -243,6 +258,9 @@ spec: - name: scheduler image: '${PGO_IMAGE_PREFIX}/pgo-scheduler:${PGO_IMAGE_TAG}' imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + privileged: false livenessProbe: exec: command: [ @@ -265,6 +283,9 @@ spec: - name: event image: '${PGO_IMAGE_PREFIX}/pgo-event:${PGO_IMAGE_TAG}' imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + privileged: false livenessProbe: httpGet: path: /ping diff --git a/installers/olm/verify.sh b/installers/olm/verify.sh index f241a4e267..400df960fe 100755 --- a/installers/olm/verify.sh +++ b/installers/olm/verify.sh @@ -20,7 +20,7 @@ if command -v oc >/dev/null; then kubectl() { oc "$@"; } elif ! command -v kubectl >/dev/null; then # Use a version of `kubectl` that matches the Kubernetes server. - eval "kubectl() { kubectl-$( kubectl-1.16 version --output=json | + eval "kubectl() { kubectl-$( kubectl-1.19 version --output=json | jq --raw-output '.serverVersion | .major + "." + .minor')"' "$@"; }' fi diff --git a/internal/apiserver/backrestservice/backrestimpl.go b/internal/apiserver/backrestservice/backrestimpl.go index 8ffd051857..0d1072e7ef 100644 --- a/internal/apiserver/backrestservice/backrestimpl.go +++ b/internal/apiserver/backrestservice/backrestimpl.go @@ -1,7 +1,7 @@ package backrestservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -25,15 +25,15 @@ import ( "strings" "time" - "github.com/crunchydata/postgres-operator/internal/apiserver/backupoptions" - "github.com/crunchydata/postgres-operator/internal/operator" - "github.com/crunchydata/postgres-operator/internal/util" - "github.com/crunchydata/postgres-operator/internal/apiserver" + "github.com/crunchydata/postgres-operator/internal/apiserver/backupoptions" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/operator" + "github.com/crunchydata/postgres-operator/internal/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" + log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,9 +42,15 @@ import ( const containername = "database" -// pgBackRestInfoCommand is the baseline command used for getting the -// pgBackRest info -var pgBackRestInfoCommand = []string{"pgbackrest", "info", "--output", "json"} +var ( + // pgBackRestExpireCommand is the baseline command used for deleting a + // pgBackRest backup + pgBackRestExpireCommand = []string{"pgbackrest", "expire", "--set"} + + // pgBackRestInfoCommand is the baseline command used for getting the + // pgBackRest info + pgBackRestInfoCommand = []string{"pgbackrest", "info", "--output", "json"} +) // repoTypeFlagS3 is used for getting the pgBackRest info for a repository that // is stored in S3 @@ -76,7 +82,7 @@ func CreateBackup(request *msgs.CreateBackrestBackupRequest, ns, pgouser string) clusterList := crv1.PgclusterList{} var err error if request.Selector != "" { - //use the selector instead of an argument list to filter on + // use the selector instead of an argument list to filter on cl, err := apiserver.Clientset. CrunchydataV1().Pgclusters(ns). List(ctx, metav1.ListOptions{LabelSelector: request.Selector}) @@ -144,18 +150,14 @@ func CreateBackup(request *msgs.CreateBackrestBackupRequest, ns, pgouser string) return resp } - if cluster.Labels[config.LABEL_BACKREST] != "true" { - resp.Status.Code = msgs.Error - resp.Status.Msg = clusterName + " does not have pgbackrest enabled" - return resp - } - - err = util.ValidateBackrestStorageTypeOnBackupRestore(request.BackrestStorageType, - cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE], false) - if err != nil { - resp.Status.Code = msgs.Error - resp.Status.Msg = err.Error() - return resp + // if a specific pgBackRest storage type was passed in to perform the + // backup, validate that this cluster can support it + if request.BackrestStorageType != "" { + if err := apiserver.ValidateBackrestStorageTypeForCommand(cluster, request.BackrestStorageType); err != nil { + resp.Status.Code = msgs.Error + resp.Status.Msg = err.Error() + return resp + } } err = apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Delete(ctx, taskName, metav1.DeleteOptions{}) @@ -165,9 +167,7 @@ func CreateBackup(request *msgs.CreateBackrestBackupRequest, ns, pgouser string) return resp } else { - //remove any previous backup job - - //selector := config.LABEL_PG_CLUSTER + "=" + clusterName + "," + config.LABEL_BACKREST + "=true" + // remove any previous backup job selector := config.LABEL_BACKREST_COMMAND + "=" + crv1.PgtaskBackrestBackup + "," + config.LABEL_PG_CLUSTER + "=" + clusterName + "," + config.LABEL_BACKREST + "=true" deletePropagation := metav1.DeletePropagationForeground err = apiserver.Clientset. @@ -179,7 +179,7 @@ func CreateBackup(request *msgs.CreateBackrestBackupRequest, ns, pgouser string) log.Error(err) } - //a hack sort of due to slow propagation + // a hack sort of due to slow propagation for i := 0; i < 3; i++ { jobList, err := apiserver.Clientset.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { @@ -195,7 +195,7 @@ func CreateBackup(request *msgs.CreateBackrestBackupRequest, ns, pgouser string) // get pod name from cluster var podname string - podname, err = getBackrestRepoPodName(cluster, ns) + podname, err = getBackrestRepoPodName(cluster) if err != nil { log.Error(err) @@ -205,10 +205,9 @@ func CreateBackup(request *msgs.CreateBackrestBackupRequest, ns, pgouser string) } // check if primary is ready - if err := isPrimaryReady(cluster, ns); err != nil { - log.Error(err) + if !isPrimaryReady(cluster) { resp.Status.Code = msgs.Error - resp.Status.Msg = err.Error() + resp.Status.Msg = "primary pod is not in Ready state" return resp } @@ -217,9 +216,8 @@ func CreateBackup(request *msgs.CreateBackrestBackupRequest, ns, pgouser string) _, err = apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Create(ctx, getBackupParams( - cluster.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER], clusterName, taskName, crv1.PgtaskBackrestBackup, podname, "database", - util.GetValueOrDefault(cluster.Spec.PGOImagePrefix, apiserver.Pgo.Pgo.PGOImagePrefix), + util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, apiserver.Pgo.Cluster.CCPImagePrefix), request.BackupOpts, request.BackrestStorageType, operator.GetS3VerifyTLSSetting(cluster), jobName, ns, pgouser), metav1.CreateOptions{}, ) @@ -235,7 +233,94 @@ func CreateBackup(request *msgs.CreateBackrestBackupRequest, ns, pgouser string) return resp } -func getBackupParams(identifier, clusterName, taskName, action, podName, containerName, imagePrefix, backupOpts, backrestStorageType, s3VerifyTLS, jobName, ns, pgouser string) *crv1.Pgtask { +// DeleteBackup deletes a specific backup from a pgBackRest repository +func DeleteBackup(request msgs.DeleteBackrestBackupRequest) msgs.DeleteBackrestBackupResponse { + ctx := context.TODO() + response := msgs.DeleteBackrestBackupResponse{ + Status: msgs.Status{ + Code: msgs.Ok, + }, + } + + // first, make an attempt to get the cluster. if it does not exist, return + // an error + cluster, err := apiserver.Clientset.CrunchydataV1().Pgclusters(request.Namespace). + Get(ctx, request.ClusterName, metav1.GetOptions{}) + if err != nil { + log.Error(err) + response.Code = msgs.Error + response.Msg = err.Error() + return response + } + + // so, either we can delete the backup, or we cant, and we can only find out + // by trying. so here goes... + log.Debugf("attempting to delete backup %q cluster %q", request.Target, cluster.Name) + + // first, get the pgbackrest Pod name + podName, err := getBackrestRepoPodName(cluster) + if err != nil { + log.Error(err) + response.Code = msgs.Error + response.Msg = err.Error() + return response + } + + // determine if TLS verification is enabled or not + verifyTLS, _ := strconv.ParseBool(operator.GetS3VerifyTLSSetting(cluster)) + + // set up the command + cmd := pgBackRestExpireCommand + cmd = append(cmd, request.Target) + + // first, if storage types is empty, assume it's the posix storage type + storageTypes := cluster.Spec.BackrestStorageTypes + if len(storageTypes) == 0 { + storageTypes = append(storageTypes, crv1.BackrestStorageTypePosix) + } + + // otherwise, iterate through the different repositories types that are + // available. if it's a non-local repository, we need to set an explicit + // "--repo-type" + ok := false + + for _, storageType := range storageTypes { + c := cmd + + switch storageType { + default: // do nothing + case crv1.BackrestStorageTypeS3: + c = append(c, repoTypeFlagS3...) + + if !verifyTLS { + c = append(c, noRepoS3VerifyTLS) + } + } + + // so...we don't necessarily care about the error here, because we're + // looking for which of the repos contains the target backup. We'll log the + // error, and return it if we don't have success + if _, stderr, err := kubeapi.ExecToPodThroughAPI(apiserver.RESTConfig, + apiserver.Clientset, c, containername, podName, cluster.Namespace, nil); err != nil { + log.Infof("repo type %s does not contain backup %s or other error.", storageType, request.Target) + log.Info(stderr) + } else { + ok = true + } + } + + // if we don't ever delete the backup, provide a message as to why + if !ok { + msg := fmt.Sprintf("could not find backup %s in any repo or check logs for other errors.", request.Target) + log.Errorf(msg) + response.Code = msgs.Error + response.Msg = msg + } + + return response +} + +func getBackupParams(clusterName, taskName, action, podName, containerName, imagePrefix, backupOpts, backrestStorageType, s3VerifyTLS, jobName, ns, pgouser string) *crv1.Pgtask { var newInstance *crv1.Pgtask spec := crv1.PgtaskSpec{} spec.Name = taskName @@ -263,17 +348,16 @@ func getBackupParams(identifier, clusterName, taskName, action, podName, contain } newInstance.ObjectMeta.Labels = make(map[string]string) newInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] = clusterName - newInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = identifier newInstance.ObjectMeta.Labels[config.LABEL_PGOUSER] = pgouser return newInstance } // getBackrestRepoPodName goes through the pod list to identify the // pgBackRest repo pod and then returns the pod name. -func getBackrestRepoPodName(cluster *crv1.Pgcluster, ns string) (string, error) { +func getBackrestRepoPodName(cluster *crv1.Pgcluster) (string, error) { ctx := context.TODO() - //look up the backrest-repo pod name + // look up the backrest-repo pod name selector := "pg-cluster=" + cluster.Spec.Name + ",pgo-backrest-repo=true" options := metav1.ListOptions{ @@ -281,7 +365,7 @@ func getBackrestRepoPodName(cluster *crv1.Pgcluster, ns string) (string, error) LabelSelector: selector, } - repopods, err := apiserver.Clientset.CoreV1().Pods(ns).List(ctx, options) + repopods, err := apiserver.Clientset.CoreV1().Pods(cluster.Namespace).List(ctx, options) if len(repopods.Items) != 1 { log.Errorf("pods len != 1 for cluster %s", cluster.Spec.Name) return "", errors.New("backrestrepo pod not found for cluster " + cluster.Spec.Name) @@ -296,55 +380,29 @@ func getBackrestRepoPodName(cluster *crv1.Pgcluster, ns string) (string, error) return repopodName, err } -func isPrimary(pod *v1.Pod, clusterName string) bool { - if pod.ObjectMeta.Labels[config.LABEL_SERVICE_NAME] == clusterName { - return true - } - return false - -} - -func isReady(pod *v1.Pod) bool { - readyCount := 0 - containerCount := 0 - for _, stat := range pod.Status.ContainerStatuses { - containerCount++ - if stat.Ready { - readyCount++ - } - } - if readyCount != containerCount { - return false - } - return true - -} - // isPrimaryReady goes through the pod list to first identify the // Primary pod and, once identified, determine if it is in a // ready state. If not, it returns an error, otherwise it returns // a nil value -func isPrimaryReady(cluster *crv1.Pgcluster, ns string) error { +func isPrimaryReady(cluster *crv1.Pgcluster) bool { ctx := context.TODO() - primaryReady := false - selector := fmt.Sprintf("%s=%s,%s=%s", config.LABEL_PG_CLUSTER, cluster.Name, - config.LABEL_PGHA_ROLE, config.LABEL_PGHA_ROLE_PRIMARY) + options := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), + LabelSelector: fields.AndSelectors( + fields.OneTermEqualSelector(config.LABEL_PG_CLUSTER, cluster.Name), + fields.OneTermEqualSelector(config.LABEL_PGHA_ROLE, config.LABEL_PGHA_ROLE_PRIMARY), + ).String(), + } + + pods, err := apiserver.Clientset.CoreV1().Pods(cluster.Namespace).List(ctx, options) - pods, err := apiserver.Clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { - return err - } - for _, p := range pods.Items { - if isPrimary(&p, cluster.Spec.Name) && isReady(&p) { - primaryReady = true - } + log.Error(err) + return false } - if primaryReady == false { - return errors.New("primary pod is not in Ready state") - } - return nil + return len(pods.Items) > 0 } // ShowBackrest ... @@ -363,7 +421,7 @@ func ShowBackrest(name, selector, ns string) msgs.ShowBackrestResponse { } } - //get a list of all clusters + // get a list of all clusters clusterList, err := apiserver.Clientset. CrunchydataV1().Pgclusters(ns). List(ctx, metav1.ListOptions{LabelSelector: selector}) @@ -375,9 +433,10 @@ func ShowBackrest(name, selector, ns string) msgs.ShowBackrestResponse { log.Debugf("clusters found len is %d\n", len(clusterList.Items)) - for _, c := range clusterList.Items { - podname, err := getBackrestRepoPodName(&c, ns) + for i := range clusterList.Items { + c := &clusterList.Items[i] + podname, err := getBackrestRepoPodName(c) if err != nil { log.Error(err) response.Status.Code = msgs.Error @@ -387,33 +446,23 @@ func ShowBackrest(name, selector, ns string) msgs.ShowBackrestResponse { // so we potentially add two "pieces of detail" based on whether or not we // have a local repository, a s3 repository, or both - storageTypes := c.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE] - - for _, storageType := range apiserver.GetBackrestStorageTypes() { - - // so the way we currently store the different repos is not ideal, and - // this is not being fixed right now, so we'll follow this logic: - // - // 1. If storage type is "local" and the string either contains "local" or - // is empty, we can add the pgBackRest info - // 2. if the storage type is "s3" and the string contains "s3", we can - // add the pgBackRest info - // 3. Otherwise, continue - if (storageTypes == "" && storageType != "local") || (storageTypes != "" && !strings.Contains(storageTypes, storageType)) { - continue - } + storageTypes := c.Spec.BackrestStorageTypes + // if this happens to be empty, then the storage type is "posix" + if len(storageTypes) == 0 { + storageTypes = append(storageTypes, crv1.BackrestStorageTypePosix) + } + for _, storageType := range storageTypes { // begin preparing the detailed response detail := msgs.ShowBackrestDetail{ Name: c.Name, - StorageType: storageType, + StorageType: string(storageType), } - verifyTLS, _ := strconv.ParseBool(operator.GetS3VerifyTLSSetting(&c)) + verifyTLS, _ := strconv.ParseBool(operator.GetS3VerifyTLSSetting(c)) // get the pgBackRest info using this legacy function - info, err := getInfo(c.Name, storageType, podname, ns, verifyTLS) - + info, err := getInfo(storageType, podname, ns, verifyTLS) // see if the function returned successfully, and if so, unmarshal the JSON if err != nil { log.Error(err) @@ -440,12 +489,12 @@ func ShowBackrest(name, selector, ns string) msgs.ShowBackrestResponse { return response } -func getInfo(clusterName, storageType, podname, ns string, verifyTLS bool) (string, error) { +func getInfo(storageType crv1.BackrestStorageType, podname, ns string, verifyTLS bool) (string, error) { log.Debug("backrest info command requested") cmd := pgBackRestInfoCommand - if storageType == "s3" { + if storageType == crv1.BackrestStorageTypeS3 { cmd = append(cmd, repoTypeFlagS3...) if !verifyTLS { @@ -454,7 +503,6 @@ func getInfo(clusterName, storageType, podname, ns string, verifyTLS bool) (stri } output, stderr, err := kubeapi.ExecToPodThroughAPI(apiserver.RESTConfig, apiserver.Clientset, cmd, containername, podname, ns, nil) - if err != nil { log.Error(err, stderr) return "", err @@ -506,13 +554,6 @@ func Restore(request *msgs.RestoreRequest, ns, pgouser string) msgs.RestoreRespo return resp } - // verify that the cluster we are restoring from has backrest enabled - if cluster.Labels[config.LABEL_BACKREST] != "true" { - resp.Status.Code = msgs.Error - resp.Status.Msg = "can't restore, cluster restoring from does not have backrest enabled" - return resp - } - // Return an error if any clusters identified for the restore are in standby mode. Restoring // from a standby cluster is not allowed since the cluster is following a remote primary, // which itself is responsible for performing any restores as required for the cluster. @@ -523,31 +564,27 @@ func Restore(request *msgs.RestoreRequest, ns, pgouser string) msgs.RestoreRespo return resp } - // ensure the backrest storage type specified for the backup is valid and enabled in the - // cluster - err = util.ValidateBackrestStorageTypeOnBackupRestore(request.BackrestStorageType, - cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE], true) - if err != nil { + // ensure the backrest storage type specified for the backup is valid and + // enabled in the cluster + if err := apiserver.ValidateBackrestStorageTypeForCommand(cluster, request.BackrestStorageType); err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() return resp } - var id string - id, err = createRestoreWorkflowTask(cluster.Name, ns) + id, err := createRestoreWorkflowTask(cluster) if err != nil { resp.Results = append(resp.Results, err.Error()) return resp } - pgtask, err := getRestoreParams(request, ns, *cluster) + pgtask, err := getRestoreParams(cluster, request) if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() return resp } - pgtask.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = cluster.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] pgtask.ObjectMeta.Labels[config.LABEL_PGOUSER] = pgouser pgtask.Spec.Parameters[crv1.PgtaskWorkflowID] = id @@ -559,7 +596,7 @@ func Restore(request *msgs.RestoreRequest, ns, pgouser string) msgs.RestoreRespo return resp } - //create a pgtask for the restore workflow + // create a pgtask for the restore workflow if _, err := apiserver.Clientset.CrunchydataV1().Pgtasks(ns). Create(ctx, pgtask, metav1.CreateOptions{}); err != nil { resp.Status.Code = msgs.Error @@ -575,18 +612,24 @@ func Restore(request *msgs.RestoreRequest, ns, pgouser string) msgs.RestoreRespo return resp } -func getRestoreParams(request *msgs.RestoreRequest, ns string, cluster crv1.Pgcluster) (*crv1.Pgtask, error) { +func getRestoreParams(cluster *crv1.Pgcluster, request *msgs.RestoreRequest) (*crv1.Pgtask, error) { var newInstance *crv1.Pgtask spec := crv1.PgtaskSpec{} - spec.Namespace = ns - spec.Name = "backrest-restore-" + request.FromCluster + spec.Namespace = cluster.Namespace + spec.Name = "backrest-restore-" + cluster.Name spec.TaskType = crv1.PgtaskBackrestRestore spec.Parameters = make(map[string]string) - spec.Parameters[config.LABEL_BACKREST_RESTORE_FROM_CLUSTER] = request.FromCluster + spec.Parameters[config.LABEL_BACKREST_RESTORE_FROM_CLUSTER] = cluster.Name spec.Parameters[config.LABEL_BACKREST_RESTORE_OPTS] = request.RestoreOpts spec.Parameters[config.LABEL_BACKREST_PITR_TARGET] = request.PITRTarget - spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE] = request.BackrestStorageType + + // get the repository to restore from. if not explicitly provided, the default + // for the cluster is used + spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE] = string(operator.GetRepoType(cluster)) + if request.BackrestStorageType != "" { + spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE] = request.BackrestStorageType + } // validate & parse nodeLabel if exists if request.NodeLabel != "" { @@ -598,12 +641,18 @@ func getRestoreParams(request *msgs.RestoreRequest, ns string, cluster crv1.Pgcl spec.Parameters[config.LABEL_NODE_LABEL_KEY] = parts[0] spec.Parameters[config.LABEL_NODE_LABEL_VALUE] = parts[1] + // determine if any special node affinity type must be set + spec.Parameters[config.LABEL_NODE_AFFINITY_TYPE] = "preferred" + if request.NodeAffinityType == crv1.NodeAffinityTypeRequired { + spec.Parameters[config.LABEL_NODE_AFFINITY_TYPE] = "required" + } + log.Debug("Restore node labels used from user entered flag") } newInstance = &crv1.Pgtask{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{config.LABEL_PG_CLUSTER: request.FromCluster}, + Labels: map[string]string{config.LABEL_PG_CLUSTER: cluster.Name}, Name: spec.Name, }, Spec: spec, @@ -611,26 +660,26 @@ func getRestoreParams(request *msgs.RestoreRequest, ns string, cluster crv1.Pgcl return newInstance, nil } -func createRestoreWorkflowTask(clusterName, ns string) (string, error) { +func createRestoreWorkflowTask(cluster *crv1.Pgcluster) (string, error) { ctx := context.TODO() - taskName := clusterName + "-" + crv1.PgtaskWorkflowBackrestRestoreType + taskName := cluster.Name + "-" + crv1.PgtaskWorkflowBackrestRestoreType - //delete any existing pgtask with the same name - if err := apiserver.Clientset.CrunchydataV1().Pgtasks(ns). + // delete any existing pgtask with the same name + if err := apiserver.Clientset.CrunchydataV1().Pgtasks(cluster.Namespace). Delete(ctx, taskName, metav1.DeleteOptions{}); err != nil && !kubeapi.IsNotFound(err) { return "", err } - //create pgtask CRD + // create pgtask CRD spec := crv1.PgtaskSpec{} - spec.Namespace = ns - spec.Name = clusterName + "-" + crv1.PgtaskWorkflowBackrestRestoreType + spec.Namespace = cluster.Namespace + spec.Name = cluster.Name + "-" + crv1.PgtaskWorkflowBackrestRestoreType spec.TaskType = crv1.PgtaskWorkflow spec.Parameters = make(map[string]string) spec.Parameters[crv1.PgtaskWorkflowSubmittedStatus] = time.Now().Format(time.RFC3339) - spec.Parameters[config.LABEL_PG_CLUSTER] = clusterName + spec.Parameters[config.LABEL_PG_CLUSTER] = cluster.Name u, err := ioutil.ReadFile("/proc/sys/kernel/random/uuid") if err != nil { @@ -646,10 +695,10 @@ func createRestoreWorkflowTask(clusterName, ns string) (string, error) { Spec: spec, } newInstance.ObjectMeta.Labels = make(map[string]string) - newInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] = clusterName + newInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] = cluster.Name newInstance.ObjectMeta.Labels[crv1.PgtaskWorkflowID] = spec.Parameters[crv1.PgtaskWorkflowID] - if _, err := apiserver.Clientset.CrunchydataV1().Pgtasks(ns). + if _, err := apiserver.Clientset.CrunchydataV1().Pgtasks(cluster.Namespace). Create(ctx, newInstance, metav1.CreateOptions{}); err != nil { log.Error(err) return "", err diff --git a/internal/apiserver/backrestservice/backrestservice.go b/internal/apiserver/backrestservice/backrestservice.go index e436afb878..966146a772 100644 --- a/internal/apiserver/backrestservice/backrestservice.go +++ b/internal/apiserver/backrestservice/backrestservice.go @@ -1,7 +1,7 @@ package backrestservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,12 +17,13 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" "github.com/crunchydata/postgres-operator/internal/config" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" "github.com/gorilla/mux" log "github.com/sirupsen/logrus" - "net/http" ) // CreateBackupHandler ... @@ -68,12 +69,93 @@ func CreateBackupHandler(w http.ResponseWriter, r *http.Request) { ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = CreateBackup(&request, ns, username) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) +} + +// DeleteBackrestHandler deletes a targeted backup from a pgBackRest repository +// pgo delete backup hippo --target=pgbackrest-backup-id +func DeleteBackrestHandler(w http.ResponseWriter, r *http.Request) { + // swagger:operation DELETE /backrest backrestservice + /*``` + Delete a pgBackRest backup + */ + // --- + // produces: + // - application/json + // parameters: + // - name: "PostgreSQL Cluster Disk Utilization" + // in: "body" + // schema: + // "$ref": "#/definitions/DeleteBackrestBackupRequest" + // responses: + // '200': + // description: Output + // schema: + // "$ref": "#/definitions/DeleteBackrestBackupResponse" + log.Debug("backrestservice.DeleteBackrestHandler called") + + // first, check that the requesting user is authorized to make this request + username, err := apiserver.Authn(apiserver.DELETE_BACKUP_PERM, w, r) + if err != nil { + return + } + + // decode the request paramaeters + var request msgs.DeleteBackrestBackupRequest + + if err := json.NewDecoder(r.Body).Decode(&request); err != nil { + response := msgs.DeleteBackrestBackupResponse{ + Status: msgs.Status{ + Code: msgs.Error, + Msg: err.Error(), + }, + } + _ = json.NewEncoder(w).Encode(response) + return + } + + log.Debugf("DeleteBackrestHandler parameters [%+v]", request) + + // set some of the header...though we really should not be setting the HTTP + // Status upfront, but whatever + w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + // check that the client versions match. If they don't, error out + if request.ClientVersion != msgs.PGO_VERSION { + response := msgs.DeleteBackrestBackupResponse{ + Status: msgs.Status{ + Code: msgs.Error, + Msg: apiserver.VERSION_MISMATCH_ERROR, + }, + } + _ = json.NewEncoder(w).Encode(response) + return + } + + // ensure that the user has access to this namespace. if not, error out + if _, err := apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace); err != nil { + response := msgs.DeleteBackrestBackupResponse{ + Status: msgs.Status{ + Code: msgs.Error, + Msg: err.Error(), + }, + } + _ = json.NewEncoder(w).Encode(response) + return + } + + // process the request + response := DeleteBackup(request) + + // turn the response into JSON + _ = json.NewEncoder(w).Encode(response) } // ShowBackrestHandler ... @@ -137,20 +219,19 @@ func ShowBackrestHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ShowBackrest(backupname, selector, ns) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // RestoreHandler ... @@ -195,10 +276,10 @@ func RestoreHandler(w http.ResponseWriter, r *http.Request) { ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = Restore(&request, ns, username) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/backupoptions/backupoptionsutil.go b/internal/apiserver/backupoptions/backupoptionsutil.go index b5b7ba314a..304110a3a0 100644 --- a/internal/apiserver/backupoptions/backupoptionsutil.go +++ b/internal/apiserver/backupoptions/backupoptionsutil.go @@ -1,7 +1,7 @@ package backupoptions /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -35,9 +35,9 @@ type backupOptions interface { // ValidateBackupOpts validates the backup/restore options that can be provided to the various backup // and restore utilities supported by pgo (e.g. pg_dump, pg_restore, pgBackRest, etc.) func ValidateBackupOpts(backupOpts string, request interface{}) error { - // some quick checks to make sure backup opts string is valid and should be processed and validated - if strings.TrimSpace(backupOpts) == "" { + backupOpts = strings.TrimSpace(backupOpts) + if backupOpts == "" { return nil } else if !strings.HasPrefix(strings.TrimSpace(backupOpts), "-") && !strings.HasPrefix(strings.TrimSpace(backupOpts), "--") { @@ -52,7 +52,6 @@ func ValidateBackupOpts(backupOpts string, request interface{}) error { return err } else { err := backupOptions.validate(setFlagFieldNames) - if err != nil { return err } @@ -61,7 +60,6 @@ func ValidateBackupOpts(backupOpts string, request interface{}) error { } func convertBackupOptsToStruct(backupOpts string, request interface{}) (backupOptions, []string, error) { - parsedBackupOpts := parseBackupOpts(backupOpts) optsStruct, utilityName, err := createBackupOptionsStruct(backupOpts, request) @@ -92,6 +90,8 @@ func convertBackupOptsToStruct(backupOpts string, request interface{}) (backupOp commandLine.BoolVarP(fieldVal.Addr().Interface().(*bool), flag, flagShort, false, "") case reflect.Slice: commandLine.StringArrayVarP(fieldVal.Addr().Interface().(*[]string), flag, flagShort, nil, "") + default: + return nil, nil, fmt.Errorf("invalid value for (%q/%q): %v", flag, flagShort, fieldVal) } } } @@ -109,12 +109,11 @@ func convertBackupOptsToStruct(backupOpts string, request interface{}) (backupOp } func parseBackupOpts(backupOpts string) []string { - newFields := []string{} var newField string for i, c := range backupOpts { // if another option is found, add current option to newFields array - if !(c == ' ' && backupOpts[i+1] == '-') { + if !(c == ' ' && i+1 < len(backupOpts) && backupOpts[i+1] == '-') { newField = newField + string(c) } @@ -137,7 +136,6 @@ func parseBackupOpts(backupOpts string) []string { } func createBackupOptionsStruct(backupOpts string, request interface{}) (backupOptions, string, error) { - switch request := request.(type) { case *msgs.CreateBackrestBackupRequest: return &pgBackRestBackupOptions{}, "pgBackRest", nil @@ -167,6 +165,13 @@ func isValidCompressLevel(compressLevel int) bool { } } +// isValidCompressType checks that the compression type passed in matches one +// of the ones supported by pgBackRest. However, it presently does not support +// `zst` +func isValidCompressType(compressType string) bool { + return compressType == "gz" || compressType == "bz2" || compressType == "lz4" || compressType == "none" +} + // isValidRetentionRange validates that pgBackrest Full, Diff or Archive // retention option value is set within the allowable range. // allowed: 1-9999999 @@ -215,7 +220,7 @@ func handleCustomParseErrors(err error, usage *bytes.Buffer, optsStruct backupOp func obtainSetFlagFieldNames(commandLine *pflag.FlagSet, structType reflect.Type) []string { var setFlagFieldNames []string - var visitBackupOptFlags = func(flag *pflag.Flag) { + visitBackupOptFlags := func(flag *pflag.Flag) { for i := 0; i < structType.NumField(); i++ { field := structType.Field(i) flagName, _ := field.Tag.Lookup("flag") diff --git a/internal/apiserver/backupoptions/backupoptionsutil_test.go b/internal/apiserver/backupoptions/backupoptionsutil_test.go new file mode 100644 index 0000000000..df95262d50 --- /dev/null +++ b/internal/apiserver/backupoptions/backupoptionsutil_test.go @@ -0,0 +1,40 @@ +package backupoptions + +/* +Copyright 2021 - 2023 Crunchy Data Solutions, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +import "testing" + +func TestIsValidCompressType(t *testing.T) { + tests := []struct { + compressType string + expected bool + }{ + {compressType: "bz2", expected: true}, + {compressType: "gz", expected: true}, + {compressType: "none", expected: true}, + {compressType: "lz4", expected: true}, + {compressType: "zst", expected: false}, + {compressType: "bogus", expected: false}, + } + + for _, test := range tests { + t.Run(test.compressType, func(t *testing.T) { + if isValidCompressType(test.compressType) != test.expected { + t.Fatalf("expected %q to be %t", test.compressType, test.expected) + } + }) + } +} diff --git a/internal/apiserver/backupoptions/pgbackrestoptions.go b/internal/apiserver/backupoptions/pgbackrestoptions.go index 2c7a1e356e..fe88291cc2 100644 --- a/internal/apiserver/backupoptions/pgbackrestoptions.go +++ b/internal/apiserver/backupoptions/pgbackrestoptions.go @@ -1,7 +1,7 @@ package backupoptions /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -25,8 +25,6 @@ var pgBackRestOptsDenyList = []string{ "--config", "--config-include-path", "--config-path", - "--link-all", - "--link-map", "--lock-path", "--log-timestamp", "--neutral-umask", @@ -84,6 +82,7 @@ type pgBackRestBackupOptions struct { NoCompress bool `flag:"no-compress"` CompressLevel int `flag:"compress-level"` CompressLevelNetwork int `flag:"compress-level-network"` + CompressType string `flag:"compress-type"` DBTimeout int `flag:"db-timeout"` Delta bool `flag:"no-delta"` ProcessMax int `flag:"process-max"` @@ -131,7 +130,6 @@ func (backRestBackupOpts pgBackRestBackupOptions) validate(setFlagFieldNames []s var errstrings []string for _, setFlag := range setFlagFieldNames { - switch setFlag { case "BackupType": if !isValidValue([]string{"full", "diff", "incr"}, backRestBackupOpts.BackupType) { @@ -148,6 +146,11 @@ func (backRestBackupOpts pgBackRestBackupOptions) validate(setFlagFieldNames []s err := errors.New("Invalid network compress level for pgBackRest backup") errstrings = append(errstrings, err.Error()) } + case "CompressType": + if !isValidCompressType(backRestBackupOpts.CompressType) { + err := errors.New("Invalid compress type for pgBackRest backup") + errstrings = append(errstrings, err.Error()) + } case "LogLevelConsole": if !isValidBackrestLogLevel(backRestBackupOpts.LogLevelConsole) { err := errors.New("Invalid log level for pgBackRest backup") @@ -194,11 +197,9 @@ func (backRestBackupOpts pgBackRestBackupOptions) validate(setFlagFieldNames []s } func (backRestRestoreOpts pgBackRestRestoreOptions) validate(setFlagFieldNames []string) error { - var errstrings []string for _, setFlag := range setFlagFieldNames { - switch setFlag { case "TargetAction": if !isValidValue([]string{"pause", "promote", "shutdown"}, backRestRestoreOpts.TargetAction) { diff --git a/internal/apiserver/backupoptions/pgdumpoptions.go b/internal/apiserver/backupoptions/pgdumpoptions.go index 268aa42412..a1a7e155a4 100644 --- a/internal/apiserver/backupoptions/pgdumpoptions.go +++ b/internal/apiserver/backupoptions/pgdumpoptions.go @@ -1,7 +1,7 @@ package backupoptions /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -165,11 +165,9 @@ type pgRestoreOptions struct { } func (dumpOpts pgDumpOptions) validate(setFlagFieldNames []string) error { - var errstrings []string for _, setFlag := range setFlagFieldNames { - switch setFlag { case "Format": if !isValidValue([]string{"p", "plain", "c", "custom", "t", "tar"}, dumpOpts.Format) { @@ -214,11 +212,9 @@ func (dumpOpts pgDumpOptions) validate(setFlagFieldNames []string) error { } func (dumpAllOpts pgDumpAllOptions) validate(setFlagFieldNames []string) error { - var errstrings []string for _, setFlag := range setFlagFieldNames { - switch setFlag { case "SuperUser": if !dumpAllOpts.DisableTriggers { @@ -243,11 +239,9 @@ func (dumpAllOpts pgDumpAllOptions) validate(setFlagFieldNames []string) error { } func (restoreOpts pgRestoreOptions) validate(setFlagFieldNames []string) error { - var errstrings []string for _, setFlag := range setFlagFieldNames { - switch setFlag { case "Format": if !isValidValue([]string{"p", "plain", "c", "custom", "t", "tar"}, restoreOpts.Format) { diff --git a/internal/apiserver/catservice/catimpl.go b/internal/apiserver/catservice/catimpl.go index af86a2a725..8f8bbc088c 100644 --- a/internal/apiserver/catservice/catimpl.go +++ b/internal/apiserver/catservice/catimpl.go @@ -1,7 +1,7 @@ package catservice /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -101,7 +101,6 @@ func Cat(request *msgs.CatRequest, ns string) msgs.CatResponse { // run cat on the postgres pod, remember we are assuming // first container in the pod is always the postgres container. func cat(pod *v1.Pod, ns string, args []string) (string, error) { - command := make([]string, 0) command = append(command, "cat") for i := 1; i < len(args); i++ { @@ -120,10 +119,10 @@ func cat(pod *v1.Pod, ns string, args []string) (string, error) { return stdout, err } -//make sure the parameters to the cat command dont' container mischief +// make sure the parameters to the cat command dont' container mischief func validateArgs(args []string) error { var err error - var bad = "&|;>" + bad := "&|;>" for i := 1; i < len(args); i++ { if strings.ContainsAny(args[i], bad) { diff --git a/internal/apiserver/catservice/catservice.go b/internal/apiserver/catservice/catservice.go index 439274271e..bdc2ce97d5 100644 --- a/internal/apiserver/catservice/catservice.go +++ b/internal/apiserver/catservice/catservice.go @@ -1,7 +1,7 @@ package catservice /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,10 +17,11 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) // CatHandler ... @@ -65,7 +66,7 @@ func CatHandler(w http.ResponseWriter, r *http.Request) { resp := msgs.CatResponse{} resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -74,9 +75,9 @@ func CatHandler(w http.ResponseWriter, r *http.Request) { resp := msgs.CatResponse{} resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } - json.NewEncoder(w).Encode(catResponse) + _ = json.NewEncoder(w).Encode(catResponse) } diff --git a/internal/apiserver/clusterservice/clusterimpl.go b/internal/apiserver/clusterservice/clusterimpl.go index 67eaea056c..374344054e 100644 --- a/internal/apiserver/clusterservice/clusterimpl.go +++ b/internal/apiserver/clusterservice/clusterimpl.go @@ -1,7 +1,7 @@ package clusterservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io/ioutil" + "reflect" "strconv" "strings" "time" @@ -29,6 +30,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/kubeapi" "github.com/crunchydata/postgres-operator/internal/operator/backrest" + clusteroperator "github.com/crunchydata/postgres-operator/internal/operator/cluster" "github.com/crunchydata/postgres-operator/internal/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" @@ -38,6 +40,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/kubernetes" ) @@ -66,7 +69,7 @@ func DeleteCluster(name, selector string, deleteData, deleteBackups bool, ns, pg log.Debugf("delete-data is [%t]", deleteData) log.Debugf("delete-backups is [%t]", deleteBackups) - //get the clusters list + // get the clusters list clusterList, err := apiserver.Clientset.CrunchydataV1().Pgclusters(ns).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { response.Status.Code = msgs.Error @@ -80,7 +83,8 @@ func DeleteCluster(name, selector string, deleteData, deleteBackups bool, ns, pg return response } - for _, cluster := range clusterList.Items { + for i := range clusterList.Items { + cluster := &clusterList.Items[i] // check if the current cluster is not upgraded to the deployed // Operator version. If not, do not allow the command to complete @@ -91,23 +95,16 @@ func DeleteCluster(name, selector string, deleteData, deleteBackups bool, ns, pg } log.Debugf("deleting cluster %s", cluster.Spec.Name) - taskName := cluster.Spec.Name + "-rmdata" - log.Debugf("creating taskName %s", taskName) - isBackup := false - isReplica := false - replicaName := "" - clusterPGHAScope := cluster.ObjectMeta.Labels[config.LABEL_PGHA_SCOPE] // first delete any existing rmdata pgtask with the same name - err = apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Delete(ctx, taskName, metav1.DeleteOptions{}) + err = apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Delete(ctx, cluster.Name+"-rmdata", metav1.DeleteOptions{}) if err != nil && !kerrors.IsNotFound(err) { response.Status.Code = msgs.Error response.Status.Msg = err.Error() return response } - err := apiserver.CreateRMDataTask(cluster.Spec.Name, replicaName, taskName, deleteBackups, deleteData, isReplica, isBackup, ns, clusterPGHAScope) - if err != nil { + if err := util.CreateRMDataTask(apiserver.Clientset, cluster, "", deleteBackups, deleteData, false, false); err != nil { log.Debugf("error on creating rmdata task %s", err.Error()) response.Status.Code = msgs.Error response.Status.Msg = err.Error() @@ -119,7 +116,6 @@ func DeleteCluster(name, selector string, deleteData, deleteBackups bool, ns, pg } return response - } // ShowCluster ... @@ -141,7 +137,7 @@ func ShowCluster(name, selector, ccpimagetag, ns string, allflag bool) msgs.Show log.Debugf("selector on showCluster is %s", selector) - //get a list of all clusters + // get a list of all clusters clusterList, err := apiserver.Clientset.CrunchydataV1().Pgclusters(ns).List(ctx, metav1.ListOptions{LabelSelector: selector}) if err != nil { response.Status.Code = msgs.Error @@ -151,7 +147,8 @@ func ShowCluster(name, selector, ccpimagetag, ns string, allflag bool) msgs.Show log.Debugf("clusters found len is %d", len(clusterList.Items)) - for _, c := range clusterList.Items { + for i := range clusterList.Items { + c := clusterList.Items[i] detail := msgs.ShowClusterDetail{} detail.Cluster = c detail.Deployments, err = getDeployments(&c, ns) @@ -190,7 +187,6 @@ func ShowCluster(name, selector, ccpimagetag, ns string, allflag bool) msgs.Show } return response - } func getDeployments(cluster *crv1.Pgcluster, ns string) ([]msgs.ShowClusterDeployment, error) { @@ -226,7 +222,7 @@ func GetPods(clientset kubernetes.Interface, cluster *crv1.Pgcluster) ([]msgs.Sh ctx := context.TODO() output := []msgs.ShowClusterPod{} - //get pods, but exclude backup pods and backrest repo + // get pods, but exclude backup pods and backrest repo selector := fmt.Sprintf("%s=%s,%s", config.LABEL_PG_CLUSTER, cluster.GetName(), config.LABEL_PG_DATABASE) log.Debugf("selector for GetPods is %s", selector) @@ -235,14 +231,15 @@ func GetPods(clientset kubernetes.Interface, cluster *crv1.Pgcluster) ([]msgs.Sh return output, err } - for _, p := range pods.Items { + for i := range pods.Items { + p := &pods.Items[i] d := msgs.ShowClusterPod{ PVC: []msgs.ShowClusterPodPVC{}, } d.Name = p.Name d.Phase = string(p.Status.Phase) d.NodeName = p.Spec.NodeName - d.ReadyStatus, d.Ready = getReadyStatus(&p) + d.ReadyStatus, d.Ready = getReadyStatus(p) // get information about several of the PVCs. This borrows from a legacy // method to get this information @@ -260,7 +257,6 @@ func GetPods(clientset kubernetes.Interface, cluster *crv1.Pgcluster) ([]msgs.Sh pvcName := v.VolumeSource.PersistentVolumeClaim.ClaimName // query the PVC to get the storage capacity pvc, err := clientset.CoreV1().PersistentVolumeClaims(cluster.Namespace).Get(ctx, pvcName, metav1.GetOptions{}) - // if there is an error, ignore it, and move on to the next one if err != nil { log.Warn(err) @@ -278,7 +274,7 @@ func GetPods(clientset kubernetes.Interface, cluster *crv1.Pgcluster) ([]msgs.Sh } d.Primary = false - d.Type = getType(&p, cluster.Spec.Name) + d.Type = getType(p) if d.Type == msgs.PodTypePrimary { d.Primary = true } @@ -287,7 +283,6 @@ func GetPods(clientset kubernetes.Interface, cluster *crv1.Pgcluster) ([]msgs.Sh } return output, err - } func getServices(cluster *crv1.Pgcluster, ns string) ([]msgs.ShowClusterService, error) { @@ -304,12 +299,15 @@ func getServices(cluster *crv1.Pgcluster, ns string) ([]msgs.ShowClusterService, for _, p := range services.Items { d := msgs.ShowClusterService{} d.Name = p.Name - if strings.Contains(p.Name, "-backrest-repo") { + if strings.HasSuffix(p.Name, "-backrest-repo") { d.BackrestRepo = true d.ClusterName = cluster.Name - } else if strings.Contains(p.Name, "-pgbouncer") { + } else if strings.HasSuffix(p.Name, "-pgbouncer") { d.Pgbouncer = true d.ClusterName = cluster.Name + } else if strings.HasSuffix(p.Name, "-pgadmin") { + d.PGAdmin = true + d.ClusterName = cluster.Name } d.ClusterIP = p.Spec.ClusterIP for _, port := range p.Spec.Ports { @@ -366,7 +364,6 @@ func TestCluster(name, selector, ns, pgouser string, allFlag bool) msgs.ClusterT // Find a list of a clusters that match the given selector clusterList, err := apiserver.Clientset.CrunchydataV1().Pgclusters(ns).List(ctx, metav1.ListOptions{LabelSelector: selector}) - // If the response errors, return here, as we won't be able to return any // useful information in the test if err != nil { @@ -379,7 +376,8 @@ func TestCluster(name, selector, ns, pgouser string, allFlag bool) msgs.ClusterT log.Debugf("Total clusters found: %d", len(clusterList.Items)) // Iterate through each cluster and perform the various tests against them - for _, c := range clusterList.Items { + for i := range clusterList.Items { + c := clusterList.Items[i] // Set up the object that will be appended to the response that // indicates the availability of the endpoints / instances for this // cluster @@ -395,7 +393,6 @@ func TestCluster(name, selector, ns, pgouser string, allFlag bool) msgs.ClusterT // Get the PostgreSQL instances! log.Debugf("Looking up instance pods for cluster: %s", c.Name) pods, err := GetPrimaryAndReplicaPods(&c, ns) - // if there is an error with returning the primary/replica instances, // then error and continue if err != nil { @@ -489,8 +486,10 @@ func TestCluster(name, selector, ns, pgouser string, allFlag bool) msgs.ClusterT switch { default: endpoint.InstanceType = msgs.ClusterTestInstanceTypePrimary - case strings.Contains(service.Name, msgs.PodTypeReplica): + case (strings.HasSuffix(service.Name, "-"+msgs.PodTypeReplica) && strings.Count(service.Name, "-"+msgs.PodTypeReplica) == 1): endpoint.InstanceType = msgs.ClusterTestInstanceTypeReplica + case service.PGAdmin: + endpoint.InstanceType = msgs.ClusterTestInstanceTypePGAdmin case service.Pgbouncer: endpoint.InstanceType = msgs.ClusterTestInstanceTypePGBouncer case service.BackrestRepo: @@ -582,18 +581,10 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. return resp } - userLabelsMap := make(map[string]string) - if request.UserLabels != "" { - labels := strings.Split(request.UserLabels, ",") - for _, v := range labels { - p := strings.Split(v, "=") - if len(p) < 2 { - resp.Status.Code = msgs.Error - resp.Status.Msg = "invalid labels format" - return resp - } - userLabelsMap[p[0]] = p[1] - } + if err := util.ValidateLabels(request.UserLabels); err != nil { + resp.Status.Code = msgs.Error + resp.Status.Msg = err.Error() + return resp } // validate any parameters provided to bootstrap the cluster from an existing data source @@ -716,28 +707,16 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. resp.Status.Msg = err.Error() return resp } - //add a label for the custom config - userLabelsMap[config.LABEL_CUSTOM_CONFIG] = request.CustomConfig - } - - //set the metrics flag with the global setting first - userLabelsMap[config.LABEL_EXPORTER] = strconv.FormatBool(apiserver.MetricsFlag) - if err != nil { - log.Error(err) } - //if metrics is chosen on the pgo command, stick it into the user labels - if request.MetricsFlag { - userLabelsMap[config.LABEL_EXPORTER] = "true" - } - if request.ServiceType != "" { - if request.ServiceType != config.DEFAULT_SERVICE_TYPE && request.ServiceType != config.LOAD_BALANCER_SERVICE_TYPE && request.ServiceType != config.NODEPORT_SERVICE_TYPE { - resp.Status.Code = msgs.Error - resp.Status.Msg = "error ServiceType should be either ClusterIP or LoadBalancer " - - return resp - } - userLabelsMap[config.LABEL_SERVICE_TYPE] = request.ServiceType + // validate the optional ServiceType parameter + switch request.ServiceType { + default: + resp.Status.Code = msgs.Error + resp.Status.Msg = fmt.Sprintf("invalid service type %q", request.ServiceType) + return resp + case v1.ServiceTypeClusterIP, v1.ServiceTypeNodePort, + v1.ServiceTypeLoadBalancer, v1.ServiceTypeExternalName, "": // no-op } // if the request is for a standby cluster then validate it to ensure all parameters have @@ -763,18 +742,13 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. // ensure the backrest storage type specified for the cluster is valid, and that the // configuration required to use that storage type (e.g. a bucket, endpoint and region // when using aws s3 storage) has been provided - err = validateBackrestStorageTypeOnCreate(request) + backrestStorageTypes, err := validateBackrestStorageTypeOnCreate(request) if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() return resp } - if request.BackrestStorageType != "" { - log.Debug("using backrest storage type provided by user") - userLabelsMap[config.LABEL_BACKREST_STORAGE_TYPE] = request.BackrestStorageType - } - // if a value for BackrestStorageConfig is provided, validate it here if request.BackrestStorageConfig != "" && !apiserver.IsValidStorageName(request.BackrestStorageConfig) { resp.Status.Code = msgs.Error @@ -782,13 +756,6 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. return resp } - log.Debug("userLabelsMap") - log.Debugf("%v", userLabelsMap) - - if existsGlobalConfig(ns) { - userLabelsMap[config.LABEL_CUSTOM_CONFIG] = config.GLOBAL_CUSTOM_CONFIGMAP - } - if request.StorageConfig != "" && !apiserver.IsValidStorageName(request.StorageConfig) { resp.Status.Code = msgs.Error resp.Status.Msg = fmt.Sprintf("%q storage config was not found", request.StorageConfig) @@ -814,16 +781,10 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. resp.Status.Msg = err.Error() return resp } - - parts := strings.Split(request.NodeLabel, "=") - userLabelsMap[config.LABEL_NODE_LABEL_KEY] = parts[0] - userLabelsMap[config.LABEL_NODE_LABEL_VALUE] = parts[1] - - log.Debug("primary node labels used from user entered flag") } if request.ReplicaStorageConfig != "" { - if apiserver.IsValidStorageName(request.ReplicaStorageConfig) == false { + if !apiserver.IsValidStorageName(request.ReplicaStorageConfig) { resp.Status.Code = msgs.Error resp.Status.Msg = request.ReplicaStorageConfig + " Storage config was not found " return resp @@ -831,11 +792,23 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. } // if the pgBouncer flag is set, validate that replicas is set to a - // nonnegative value - if request.PgbouncerFlag && request.PgBouncerReplicas < 0 { - resp.Status.Code = msgs.Error - resp.Status.Msg = fmt.Sprintf(apiserver.ErrMessageReplicas+" for pgBouncer", 1) - return resp + // nonnegative value and the service type. + if request.PgbouncerFlag { + if request.PgBouncerReplicas < 0 { + resp.Status.Code = msgs.Error + resp.Status.Msg = fmt.Sprintf(apiserver.ErrMessageReplicas+" for pgBouncer", 1) + return resp + } + + // validate the optional ServiceType parameter + switch request.PgBouncerServiceType { + default: + resp.Status.Code = msgs.Error + resp.Status.Msg = fmt.Sprintf("invalid pgBouncer service type %q", request.PgBouncerServiceType) + return resp + case v1.ServiceTypeClusterIP, v1.ServiceTypeNodePort, + v1.ServiceTypeLoadBalancer, v1.ServiceTypeExternalName, "": // no-op + } } // if a value is provided in the request for PodAntiAffinity, then ensure is valid. If @@ -849,9 +822,6 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. resp.Status.Msg = err.Error() return resp } - userLabelsMap[config.LABEL_POD_ANTI_AFFINITY] = request.PodAntiAffinity - } else { - userLabelsMap[config.LABEL_POD_ANTI_AFFINITY] = "" } // check to see if there are any pod anti-affinity overrides, specifically for @@ -876,12 +846,6 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. } } - // if synchronous replication has been enabled, then add to user labels - if request.SyncReplication != nil { - userLabelsMap[config.LABEL_SYNC_REPLICATION] = - string(strconv.FormatBool(*request.SyncReplication)) - } - // pgBackRest URI style must be set to either 'path' or 'host'. If it is neither, // log an error and stop the cluster from being created. if request.BackrestS3URIStyle != "" { @@ -893,31 +857,29 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. } // Create an instance of our CRD - newInstance := getClusterParams(request, clusterName, userLabelsMap, ns) + newInstance := getClusterParams(request, clusterName, ns) newInstance.ObjectMeta.Labels[config.LABEL_PGOUSER] = pgouser + newInstance.Spec.BackrestStorageTypes = backrestStorageTypes if request.SecretFrom != "" { - err = validateSecretFrom(request.SecretFrom, newInstance.Spec.User, ns) + err = validateSecretFrom(newInstance, request.SecretFrom) if err != nil { resp.Status.Code = msgs.Error - resp.Status.Msg = request.SecretFrom + " secret was not found " + resp.Status.Msg = err.Error() return resp } } - validateConfigPolicies(clusterName, request.Policies, ns) + _ = validateConfigPolicies(clusterName, request.Policies, ns) // create the user secrets // first, the superuser - if secretName, password, err := createUserSecret(request, newInstance, crv1.RootSecretSuffix, - crv1.PGUserSuperuser, request.PasswordSuperuser); err != nil { + if password, err := createUserSecret(request, newInstance, crv1.PGUserSuperuser, request.PasswordSuperuser); err != nil { log.Error(err) resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() return resp } else { - newInstance.Spec.RootSecretName = secretName - // if the user requests to show system accounts, append it to the list if request.ShowSystemAccounts { user := msgs.CreateClusterDetailUser{ @@ -930,15 +892,12 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. } // next, the replication user - if secretName, password, err := createUserSecret(request, newInstance, crv1.PrimarySecretSuffix, - crv1.PGUserReplication, request.PasswordReplication); err != nil { + if password, err := createUserSecret(request, newInstance, crv1.PGUserReplication, request.PasswordReplication); err != nil { log.Error(err) resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() return resp } else { - newInstance.Spec.PrimarySecretName = secretName - // if the user requests to show system accounts, append it to the list if request.ShowSystemAccounts { user := msgs.CreateClusterDetailUser{ @@ -951,16 +910,12 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. } // finally, the user from the request and/or default user - userSecretSuffix := fmt.Sprintf("-%s%s", newInstance.Spec.User, crv1.UserSecretSuffix) - if secretName, password, err := createUserSecret(request, newInstance, userSecretSuffix, newInstance.Spec.User, - request.Password); err != nil { + if password, err := createUserSecret(request, newInstance, newInstance.Spec.User, request.Password); err != nil { log.Error(err) resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() return resp } else { - newInstance.Spec.UserSecretName = secretName - user := msgs.CreateClusterDetailUser{ Username: newInstance.Spec.User, Password: password, @@ -969,60 +924,83 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. resp.Result.Users = append(resp.Result.Users, user) } - // there's a secret for the monitoring user too - newInstance.Spec.CollectSecretName = clusterName + crv1.ExporterSecretSuffix - // Create Backrest secret for S3/SSH Keys: // We make this regardless if backrest is enabled or not because // the deployment template always tries to mount /sshd volume secretName := fmt.Sprintf("%s-%s", clusterName, config.LABEL_BACKREST_REPO_SECRET) - if _, err := apiserver.Clientset. - CoreV1().Secrets(request.Namespace). - Get(ctx, secretName, metav1.GetOptions{}); kubeapi.IsNotFound(err) { - // determine if a custom CA secret should be used - backrestS3CACert := []byte{} + // determine if a custom CA secret should be used + backrestS3CACert := []byte{} - if request.BackrestS3CASecretName != "" { - backrestSecret, err := apiserver.Clientset. - CoreV1().Secrets(request.Namespace). - Get(ctx, request.BackrestS3CASecretName, metav1.GetOptions{}) + if request.BackrestS3CASecretName != "" { + backrestSecret, err := apiserver.Clientset. + CoreV1().Secrets(request.Namespace). + Get(ctx, request.BackrestS3CASecretName, metav1.GetOptions{}) + if err != nil { + log.Error(err) + resp.Status.Code = msgs.Error + resp.Status.Msg = fmt.Sprintf("Error finding pgBackRest S3 CA secret \"%s\": %s", + request.BackrestS3CASecretName, err.Error()) + return resp + } - if err != nil { - log.Error(err) - resp.Status.Code = msgs.Error - resp.Status.Msg = fmt.Sprintf("Error finding pgBackRest S3 CA secret \"%s\": %s", - request.BackrestS3CASecretName, err.Error()) - return resp - } + // attempt to retrieves the custom CA, assuming it has the name + // "aws-s3-ca.crt" + backrestS3CACert = backrestSecret.Data[util.BackRestRepoSecretKeyAWSS3KeyAWSS3CACert] + } - // attempt to retrieves the custom CA, assuming it has the name - // "aws-s3-ca.crt" - backrestS3CACert = backrestSecret.Data[util.BackRestRepoSecretKeyAWSS3KeyAWSS3CACert] - } + // save the S3 credentials in a single map so it can be used to either create a new + // secret or update an existing one + s3Credentials := map[string][]byte{ + util.BackRestRepoSecretKeyAWSS3KeyAWSS3CACert: backrestS3CACert, + util.BackRestRepoSecretKeyAWSS3KeyAWSS3Key: []byte(request.BackrestS3Key), + util.BackRestRepoSecretKeyAWSS3KeyAWSS3KeySecret: []byte(request.BackrestS3KeySecret), + } - err := util.CreateBackrestRepoSecrets(apiserver.Clientset, - util.BackrestRepoConfig{ - BackrestS3CA: backrestS3CACert, - BackrestS3Key: request.BackrestS3Key, - BackrestS3KeySecret: request.BackrestS3KeySecret, - ClusterName: clusterName, - ClusterNamespace: request.Namespace, - OperatorNamespace: apiserver.PgoNamespace, - }) + _, err = apiserver.Clientset.CoreV1().Secrets(request.Namespace). + Get(ctx, secretName, metav1.GetOptions{}) - if err != nil { + switch { + case kubeapi.IsNotFound(err): + // The pgBackRest repo config secret was not found, create it. + // Set up the secret for the cluster that contains the pgBackRest + // information + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Labels: map[string]string{ + config.LABEL_VENDOR: config.LABEL_CRUNCHY, + config.LABEL_PG_CLUSTER: clusterName, + config.LABEL_PGO_BACKREST_REPO: "true", + }, + }, + Data: s3Credentials, + } + + if _, err := apiserver.Clientset.CoreV1().Secrets(ns).Create(ctx, secret, metav1.CreateOptions{}); err != nil && !kubeapi.IsAlreadyExists(err) { resp.Status.Code = msgs.Error resp.Status.Msg = fmt.Sprintf("could not create backrest repo secret: %s", err) return resp } - } else if err != nil { + + case err != nil: + // An error occurred other than 'not found'. Log the error received when + // attempting to get the pgBackRest repo config secret, then return. resp.Status.Code = msgs.Error resp.Status.Msg = fmt.Sprintf("could not query if backrest repo secret exits: %s", err) return resp + default: + // the pgBackRest repo config secret already exists, update any provided + // S3 credential information + err = updateRepoSecret(apiserver.Clientset, secretName, request.Namespace, s3Credentials) + if err != nil { + resp.Status.Code = msgs.Error + resp.Status.Msg = fmt.Sprintf("could not update backrest repo secret: %s", err) + return resp + } } - //create a workflow for this new cluster + // create a workflow for this new cluster id, err = createWorkflowTask(clusterName, ns, pgouser) if err != nil { log.Error(err) @@ -1037,7 +1015,7 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. newInstance.Spec.UserLabels[config.LABEL_WORKFLOW_ID] = id resp.Result.Database = newInstance.Spec.Database - //create CRD for new cluster + // create CRD for new cluster _, err = apiserver.Clientset.CrunchydataV1().Pgclusters(ns).Create(ctx, newInstance, metav1.CreateOptions{}) if err != nil { resp.Status.Code = msgs.Error @@ -1052,6 +1030,30 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs. return resp } +// updateRepoSecret updates the existing pgBackRest repo config secret with any +// provided S3/GCS connection information. +func updateRepoSecret(clientset kubernetes.Interface, secretName, + namespace string, connectionInfo map[string][]byte) error { + ctx := context.TODO() + + // Get the secret + secret, err := clientset.CoreV1().Secrets(namespace). + Get(ctx, secretName, metav1.GetOptions{}) + // The secret should already exist at this point. If there is any error, + // return. + if err != nil { + return err + } + + // update the secret data + for k, v := range connectionInfo { + secret.Data[k] = v + } + _, err = clientset.CoreV1().Secrets(secret.Namespace).Update(ctx, secret, + metav1.UpdateOptions{}) + return err +} + func validateConfigPolicies(clusterName, PoliciesFlag, ns string) error { ctx := context.TODO() var err error @@ -1078,7 +1080,7 @@ func validateConfigPolicies(clusterName, PoliciesFlag, ns string) error { log.Error("error getting pgpolicy " + v + err.Error()) return err } - //create a pgtask to add the policy after the db is ready + // create a pgtask to add the policy after the db is ready } spec := crv1.PgtaskSpec{} @@ -1107,8 +1109,7 @@ func validateConfigPolicies(clusterName, PoliciesFlag, ns string) error { return err } -func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabelsMap map[string]string, ns string) *crv1.Pgcluster { - +func getClusterParams(request *msgs.CreateClusterRequest, name string, ns string) *crv1.Pgcluster { spec := crv1.PgclusterSpec{ Annotations: crv1.ClusterAnnotations{ Backrest: map[string]string{}, @@ -1126,11 +1127,13 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel Limits: v1.ResourceList{}, Resources: v1.ResourceList{}, }, + UserLabels: map[string]string{}, } - if userLabelsMap[config.LABEL_CUSTOM_CONFIG] != "" { - spec.CustomConfig = userLabelsMap[config.LABEL_CUSTOM_CONFIG] - } + // enable the exporter sidecar based on the what the user passed in or what + // the default value is. the user value takes precedence, unless it's false, + // as the legacy check only looked for enablement + spec.Exporter = request.MetricsFlag || apiserver.MetricsFlag // if the request has overriding CPU/Memory requests/limits parameters, // these will take precedence over the defaults @@ -1226,6 +1229,9 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel if request.PgBouncerReplicas > 0 { spec.PgBouncer.Replicas = request.PgBouncerReplicas } + + // additionally if a specific pgBouncer Service Type is set, set that here + spec.PgBouncer.ServiceType = request.PgBouncerServiceType } // similarly, if there are any overriding pgBouncer container resource request @@ -1256,6 +1262,11 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel spec.PgBouncer.Resources[v1.ResourceMemory] = apiserver.Pgo.Cluster.DefaultPgBouncerResourceMemory } + // if TLS is enabled for pgBouncer, ensure the secret is specified + if request.PgBouncerTLSSecret != "" { + spec.PgBouncer.TLSSecret = request.PgBouncerTLSSecret + } + spec.PrimaryStorage, _ = apiserver.Pgo.GetStorageSpec(apiserver.Pgo.PrimaryStorage) if request.StorageConfig != "" { spec.PrimaryStorage, _ = apiserver.Pgo.GetStorageSpec(request.StorageConfig) @@ -1273,6 +1284,16 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel spec.PodAntiAffinity = podAntiAffinity } + // if there is a node label, set the node affinity + if request.NodeLabel != "" { + nodeLabel := strings.Split(request.NodeLabel, "=") + spec.NodeAffinity = crv1.NodeAffinitySpec{ + Default: util.GenerateNodeAffinity(request.NodeAffinityType, nodeLabel[0], []string{nodeLabel[1]}), + } + + log.Debugf("using node label %s", request.NodeLabel) + } + // if the PVCSize is overwritten, update the primary storage spec with this // value if request.PVCSize != "" { @@ -1383,10 +1404,15 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel spec.Replicas = strconv.Itoa(request.ReplicaCount) log.Debugf("replicas is %s", spec.Replicas) } - spec.UserLabels = userLabelsMap + + spec.ServiceType = request.ServiceType + + if request.UserLabels != nil { + spec.UserLabels = request.UserLabels + } spec.UserLabels[config.LABEL_PGO_VERSION] = msgs.PGO_VERSION - //override any values from config file + // override any values from config file str = apiserver.Pgo.Cluster.Port log.Debugf("%s", apiserver.Pgo.Cluster.Port) if str != "" { @@ -1488,28 +1514,21 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel setClusterAnnotationGroup(spec.Annotations.Backrest, request.Annotations.Backrest) setClusterAnnotationGroup(spec.Annotations.PgBouncer, request.Annotations.PgBouncer) + // set any tolerations + spec.Tolerations = request.Tolerations + labels := make(map[string]string) labels[config.LABEL_NAME] = name - if !request.AutofailFlag || apiserver.Pgo.Cluster.DisableAutofail { - labels[config.LABEL_AUTOFAIL] = "false" - } else { - labels[config.LABEL_AUTOFAIL] = "true" - } - + spec.DisableAutofail = !request.AutofailFlag || apiserver.Pgo.Cluster.DisableAutofail // set whether or not the cluster will be a standby cluster spec.Standby = request.Standby // set the pgBackRest repository path spec.BackrestRepoPath = request.BackrestRepoPath - // pgbadger - set with global flag first then check for a user flag - labels[config.LABEL_BADGER] = strconv.FormatBool(apiserver.BadgerFlag) - if request.BadgerFlag { - labels[config.LABEL_BADGER] = "true" - } - - // pgBackRest is always set to true. This is here due to a time where - // pgBackRest was not the only way - labels[config.LABEL_BACKREST] = "true" + // enable the pgBadger sidecar based on the what the user passed in or what + // the default value is. the user value takes precedence, unless it's false, + // as the legacy check only looked for enablement + spec.PGBadger = request.BadgerFlag || apiserver.BadgerFlag newInstance := &crv1.Pgcluster{ ObjectMeta: metav1.ObjectMeta{ @@ -1526,43 +1545,57 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel return newInstance } -func validateSecretFrom(secretname, user, ns string) error { +// validateSecretFrom is a legacy method that looks for all of the Secrets from +// a cluster defined by "clusterName" and determines if there are bootstrap +// secrets available, i.e.: +// +// - the Postgres superuser +// - the replication user +// - a user as defined vy the "user" parameter +func validateSecretFrom(cluster *crv1.Pgcluster, secretFromClusterName string) error { ctx := context.TODO() - var err error - selector := config.LABEL_PG_CLUSTER + "=" + secretname - secrets, err := apiserver.Clientset. - CoreV1().Secrets(ns). - List(ctx, metav1.ListOptions{LabelSelector: selector}) + // grab all of the Secrets from the referenced cluster so we can determine if + // the Secrets that we are looking for are present + options := metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector(config.LABEL_PG_CLUSTER, secretFromClusterName).String(), + } + + secrets, err := apiserver.Clientset.CoreV1().Secrets(cluster.Namespace).List(ctx, options) if err != nil { return err } - log.Debugf("secrets for %s", secretname) - pgprimaryFound := false - pgrootFound := false - pguserFound := false - - for _, s := range secrets.Items { - if s.ObjectMeta.Name == secretname+crv1.PrimarySecretSuffix { - pgprimaryFound = true - } else if s.ObjectMeta.Name == secretname+crv1.RootSecretSuffix { - pgrootFound = true - } else if s.ObjectMeta.Name == secretname+"-"+user+crv1.UserSecretSuffix { - pguserFound = true - } + // if no secrets are found, take an early exit + if len(secrets.Items) == 0 { + return fmt.Errorf("no secrets found for %q", secretFromClusterName) } - if !pgprimaryFound { - return errors.New(secretname + crv1.PrimarySecretSuffix + " not found") + + // see if all three of the secrets exist. this borrows from the legacy method + // of checking + found := map[string]bool{ + crv1.PGUserSuperuser: false, + crv1.PGUserReplication: false, + cluster.Spec.User: false, } - if !pgrootFound { - return errors.New(secretname + crv1.RootSecretSuffix + " not found") + + for _, secret := range secrets.Items { + found[crv1.PGUserSuperuser] = found[crv1.PGUserSuperuser] || + (secret.Name == crv1.UserSecretNameFromClusterName(secretFromClusterName, crv1.PGUserSuperuser)) + found[crv1.PGUserReplication] = found[crv1.PGUserReplication] || + (secret.Name == crv1.UserSecretNameFromClusterName(secretFromClusterName, crv1.PGUserReplication)) + found[cluster.Spec.User] = found[cluster.Spec.User] || + (secret.Name == crv1.UserSecretNameFromClusterName(secretFromClusterName, cluster.Spec.User)) } - if !pguserFound { - return errors.New(secretname + "-" + user + crv1.UserSecretSuffix + " not found") + + // if not all of the Secrets were found, return an error + for secretName, ok := range found { + if !ok { + return fmt.Errorf("could not find secret %q in cluster %q", secretName, secretFromClusterName) + } } - return err + return nil } func getReadyStatus(pod *v1.Pod) (string, bool) { @@ -1579,13 +1612,12 @@ func getReadyStatus(pod *v1.Pod) (string, bool) { equal = true } return fmt.Sprintf("%d/%d", readyCount, containerCount), equal - } func createWorkflowTask(clusterName, ns, pgouser string) (string, error) { ctx := context.TODO() - //create pgtask CRD + // create pgtask CRD spec := crv1.PgtaskSpec{} spec.Namespace = ns spec.Name = clusterName + "-" + crv1.PgtaskWorkflowCreateClusterType @@ -1621,9 +1653,7 @@ func createWorkflowTask(clusterName, ns, pgouser string) (string, error) { return spec.Parameters[crv1.PgtaskWorkflowID], err } -func getType(pod *v1.Pod, clusterName string) string { - - //log.Debugf("%v\n", pod.ObjectMeta.Labels) +func getType(pod *v1.Pod) string { if pod.ObjectMeta.Labels[config.LABEL_PGO_BACKREST_REPO] != "" { return msgs.PodTypePgbackrest } else if pod.ObjectMeta.Labels[config.LABEL_PGBOUNCER] != "" { @@ -1634,7 +1664,6 @@ func getType(pod *v1.Pod, clusterName string) string { return msgs.PodTypeReplica } return msgs.PodTypeUnknown - } func validateCustomConfig(configmapname, ns string) (bool, error) { @@ -1643,12 +1672,6 @@ func validateCustomConfig(configmapname, ns string) (bool, error) { return err == nil, err } -func existsGlobalConfig(ns string) bool { - ctx := context.TODO() - _, err := apiserver.Clientset.CoreV1().ConfigMaps(ns).Get(ctx, config.GLOBAL_CUSTOM_CONFIGMAP, metav1.GetOptions{}) - return err == nil -} - func getReplicas(cluster *crv1.Pgcluster, ns string) ([]msgs.ShowClusterReplica, error) { ctx := context.TODO() @@ -1691,11 +1714,9 @@ func getReplicas(cluster *crv1.Pgcluster, ns string) ([]msgs.ShowClusterReplica, // password length // // returns the secertname, password as well as any errors -func createUserSecret(request *msgs.CreateClusterRequest, cluster *crv1.Pgcluster, secretNameSuffix, username, password string) (string, string, error) { +func createUserSecret(request *msgs.CreateClusterRequest, cluster *crv1.Pgcluster, username, password string) (string, error) { ctx := context.TODO() - - // the secretName is just the combination cluster name and the secretNameSuffix - secretName := fmt.Sprintf("%s%s", cluster.Spec.Name, secretNameSuffix) + secretName := crv1.UserSecretName(cluster, username) // if the secret already exists, we can perform an early exit // if there is an error, we'll ignore it @@ -1704,7 +1725,7 @@ func createUserSecret(request *msgs.CreateClusterRequest, cluster *crv1.Pgcluste Get(ctx, secretName, metav1.GetOptions{}); err == nil { log.Infof("secret exists: [%s] - skipping", secretName) - return secretName, string(secret.Data["password"][:]), nil + return string(secret.Data["password"][:]), nil } // alright, go through the hierarchy and determine if we need to set the @@ -1716,15 +1737,14 @@ func createUserSecret(request *msgs.CreateClusterRequest, cluster *crv1.Pgcluste // if the "SecretFrom" parameter is set, then load the password from a prexisting password case request.SecretFrom != "": // set up the name of the secret that we are loading the secret from - secretFromSecretName := fmt.Sprintf("%s%s", request.SecretFrom, secretNameSuffix) + secretFromSecretName := fmt.Sprintf("%s-%s-secret", request.SecretFrom, username) // now attempt to load said secret oldPassword, err := util.GetPasswordFromSecret(apiserver.Clientset, cluster.Spec.Namespace, secretFromSecretName) - // if there is an error, abandon here, otherwise set the oldPassword as the // current password if err != nil { - return "", "", err + return "", err } password = oldPassword @@ -1738,10 +1758,9 @@ func createUserSecret(request *msgs.CreateClusterRequest, cluster *crv1.Pgcluste } generatedPassword, err := util.GeneratePassword(passwordLength) - // if the password fails to generate, return the error if err != nil { - return "", "", err + return "", err } password = generatedPassword @@ -1750,11 +1769,11 @@ func createUserSecret(request *msgs.CreateClusterRequest, cluster *crv1.Pgcluste // great, now we can create the secret! if we can't, return an error if err := util.CreateSecret(apiserver.Clientset, cluster.Spec.Name, secretName, username, password, cluster.Spec.Namespace); err != nil { - return "", "", err + return "", err } // otherwise, return the secret name, password - return secretName, password, nil + return password, nil } // UpdateCluster ... @@ -1848,7 +1867,7 @@ func UpdateCluster(request *msgs.UpdateClusterRequest) msgs.UpdateClusterRespons clusterList := crv1.PgclusterList{} - //get the clusters list + // get the clusters list if request.AllFlag { cl, err := apiserver.Clientset.CrunchydataV1().Pgclusters(request.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { @@ -1885,15 +1904,47 @@ func UpdateCluster(request *msgs.UpdateClusterRequest) msgs.UpdateClusterRespons return response } - for _, cluster := range clusterList.Items { + for i := range clusterList.Items { + cluster := clusterList.Items[i] - //set autofail=true or false on each pgcluster CRD + // set --enable-autofail / --disable-autofail on each pgcluster CRD // Make the change based on the value of Autofail vis-a-vis UpdateClusterAutofailStatus switch request.Autofail { case msgs.UpdateClusterAutofailEnable: - cluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL] = "true" + cluster.Spec.DisableAutofail = false case msgs.UpdateClusterAutofailDisable: - cluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL] = "false" + cluster.Spec.DisableAutofail = true + case msgs.UpdateClusterAutofailDoNothing: // no-op + } + + // enable or disable the metrics collection sidecar + switch request.Metrics { + case msgs.UpdateClusterMetricsEnable: + cluster.Spec.Exporter = true + case msgs.UpdateClusterMetricsDisable: + cluster.Spec.Exporter = false + case msgs.UpdateClusterMetricsDoNothing: // this is never reached -- no-op + } + + // enable or disable the pgBadger sidecar + switch request.PGBadger { + case msgs.UpdateClusterPGBadgerEnable: + cluster.Spec.PGBadger = true + case msgs.UpdateClusterPGBadgerDisable: + cluster.Spec.PGBadger = false + case msgs.UpdateClusterPGBadgerDoNothing: // this is never reached -- no-op + } + + // set the optional ServiceType parameter + switch request.ServiceType { + default: + response.Status.Code = msgs.Error + response.Status.Msg = fmt.Sprintf("invalid service type %q", request.ServiceType) + return response + case v1.ServiceTypeClusterIP, v1.ServiceTypeNodePort, + v1.ServiceTypeLoadBalancer, v1.ServiceTypeExternalName: + cluster.Spec.ServiceType = request.ServiceType + case "": // no-op, well, no change } // enable or disable standby mode based on UpdateClusterStandbyStatus provided in @@ -1909,15 +1960,22 @@ func UpdateCluster(request *msgs.UpdateClusterRequest) msgs.UpdateClusterRespons } case msgs.UpdateClusterStandbyDisable: cluster.Spec.Standby = false + case msgs.UpdateClusterStandbyDoNothing: // no-op } // return an error if attempting to enable standby for a cluster that does not have the // required S3 settings - if cluster.Spec.Standby && - !strings.Contains(cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE], "s3") { - response.Status.Code = msgs.Error - response.Status.Msg = "Backrest storage type 's3' must be enabled in order to enable " + - "standby mode" - return response + if cluster.Spec.Standby { + s3Enabled := false + for _, storageType := range cluster.Spec.BackrestStorageTypes { + s3Enabled = s3Enabled || (storageType == crv1.BackrestStorageTypeS3) + } + + if !s3Enabled { + response.Status.Code = msgs.Error + response.Status.Msg = "Backrest storage type 's3' must be enabled in order to enable " + + "standby mode" + return response + } } // if a startup or shutdown was requested then update the pgcluster spec accordingly @@ -2011,6 +2069,16 @@ func UpdateCluster(request *msgs.UpdateClusterRequest) msgs.UpdateClusterRespons cluster.Spec.ExporterResources[v1.ResourceMemory] = quantity } + // an odd one...if rotating the password is requested, we can perform this + // as an operational action and handle it here. + // if it fails...just put a in the logs. + if cluster.Spec.Exporter && request.ExporterRotatePassword { + if err := clusteroperator.RotateExporterPassword(apiserver.Clientset, apiserver.RESTConfig, + &cluster); err != nil { + log.Error(err) + } + } + // set any user-defined annotations // go through each annotation grouping and make the appropriate changes in the // equivalent cluster annotation group @@ -2039,6 +2107,33 @@ func UpdateCluster(request *msgs.UpdateClusterRequest) msgs.UpdateClusterRespons cluster.Spec.TablespaceMounts[tablespace.Name] = storageSpec } + // Handle any tolerations. This is fun. So we will have to go through both + // the toleration addition list as well as the toleration subtraction list. + // + // First, we will remove any tolerations that are slated for removal + if len(request.TolerationsDelete) > 0 { + tolerations := make([]v1.Toleration, 0) + + for _, toleration := range cluster.Spec.Tolerations { + delete := false + + for _, tolerationDelete := range request.TolerationsDelete { + delete = delete || (reflect.DeepEqual(toleration, tolerationDelete)) + } + + // if delete does not match, then we can include this toleration in any + // updates + if !delete { + tolerations = append(tolerations, toleration) + } + } + + cluster.Spec.Tolerations = tolerations + } + + // now, add any new tolerations to the spec + cluster.Spec.Tolerations = append(cluster.Spec.Tolerations, request.Tolerations...) + if _, err := apiserver.Clientset.CrunchydataV1().Pgclusters(request.Namespace).Update(ctx, &cluster, metav1.UpdateOptions{}); err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() @@ -2055,22 +2150,29 @@ func GetPrimaryAndReplicaPods(cluster *crv1.Pgcluster, ns string) ([]msgs.ShowCl ctx := context.TODO() output := make([]msgs.ShowClusterPod, 0) + // find all of the Pods that represent Postgres primary and replicas. + // only consider running Pods selector := config.LABEL_SERVICE_NAME + "=" + cluster.Spec.Name + "," + config.LABEL_DEPLOYMENT_NAME - log.Debugf("selector for GetPrimaryAndReplicaPods is %s", selector) - pods, err := apiserver.Clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: selector}) + options := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), + LabelSelector: selector, + } + + pods, err := apiserver.Clientset.CoreV1().Pods(ns).List(ctx, options) if err != nil { return output, err } - for _, p := range pods.Items { + for i := range pods.Items { + p := &pods.Items[i] d := msgs.ShowClusterPod{} d.Name = p.Name d.Phase = string(p.Status.Phase) d.NodeName = p.Spec.NodeName - d.ReadyStatus, d.Ready = getReadyStatus(&p) + d.ReadyStatus, d.Ready = getReadyStatus(p) d.Primary = false - d.Type = getType(&p, cluster.Spec.Name) + d.Type = getType(p) if d.Type == msgs.PodTypePrimary { d.Primary = true } @@ -2078,21 +2180,25 @@ func GetPrimaryAndReplicaPods(cluster *crv1.Pgcluster, ns string) ([]msgs.ShowCl } selector = config.LABEL_SERVICE_NAME + "=" + cluster.Spec.Name + "-replica" + "," + config.LABEL_DEPLOYMENT_NAME - log.Debugf("selector for GetPrimaryAndReplicaPods is %s", selector) + options = metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), + LabelSelector: selector, + } - pods, err = apiserver.Clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: selector}) + pods, err = apiserver.Clientset.CoreV1().Pods(ns).List(ctx, options) if err != nil { return output, err } - for _, p := range pods.Items { + for i := range pods.Items { + p := &pods.Items[i] d := msgs.ShowClusterPod{} d.Name = p.Name d.Phase = string(p.Status.Phase) d.NodeName = p.Spec.NodeName - d.ReadyStatus, d.Ready = getReadyStatus(&p) + d.ReadyStatus, d.Ready = getReadyStatus(p) d.Primary = false - d.Type = getType(&p, cluster.Spec.Name) + d.Type = getType(p) if d.Type == msgs.PodTypePrimary { d.Primary = true } @@ -2101,7 +2207,6 @@ func GetPrimaryAndReplicaPods(cluster *crv1.Pgcluster, ns string) ([]msgs.ShowCl } return output, err - } // setClusterAnnotationGroup helps with setting the specific annotation group @@ -2119,20 +2224,33 @@ func setClusterAnnotationGroup(annotationGroup, annotations map[string]string) { // validateBackrestStorageTypeOnCreate validates the pgbackrest storage type specified when // a new cluster. This includes ensuring the type provided is valid, and that the required // configuration settings (s3 bucket, region, etc.) are also present -func validateBackrestStorageTypeOnCreate(request *msgs.CreateClusterRequest) error { +func validateBackrestStorageTypeOnCreate(request *msgs.CreateClusterRequest) ([]crv1.BackrestStorageType, error) { + storageTypes, err := crv1.ParseBackrestStorageTypes(request.BackrestStorageType) - requestBackRestStorageType := request.BackrestStorageType + if err != nil { + // if the error is due to no storage types elected, return an empty storage + // type slice. otherwise return an error + if errors.Is(err, crv1.ErrStorageTypesEmpty) { + return []crv1.BackrestStorageType{}, nil + } - if requestBackRestStorageType != "" && !util.IsValidBackrestStorageType(requestBackRestStorageType) { - return fmt.Errorf("Invalid value provided for pgBackRest storage type. The following values are allowed: %s", - "\""+strings.Join(apiserver.GetBackrestStorageTypes(), "\", \"")+"\"") - } else if strings.Contains(requestBackRestStorageType, "s3") && isMissingS3Config(request) { - return errors.New("A configuration setting for AWS S3 storage is missing. Values must be " + - "provided for the S3 bucket, S3 endpoint and S3 region in order to use the 's3' " + - "storage type with pgBackRest.") + return nil, err } - return nil + // a special check -- if S3 storage is included, check to see if all of the + // appropriate settings are in place + for _, storageType := range storageTypes { + if storageType == crv1.BackrestStorageTypeS3 { + if isMissingS3Config(request) { + return nil, fmt.Errorf("A configuration setting for AWS S3 storage is missing. Values must be " + + "provided for the S3 bucket, S3 endpoint and S3 region in order to use the 's3' " + + "storage type with pgBackRest.") + } + break + } + } + + return storageTypes, nil } // validateClusterTLS validates the parameters that allow a user to enable TLS @@ -2140,12 +2258,25 @@ func validateBackrestStorageTypeOnCreate(request *msgs.CreateClusterRequest) err func validateClusterTLS(request *msgs.CreateClusterRequest) error { ctx := context.TODO() - // if ReplicationTLSSecret is set, but neither TLSSecret nor CASecret is not - // set, then return + // if ReplicationTLSSecret is set, but neither TLSSecret nor CASecret is set + // then return if request.ReplicationTLSSecret != "" && (request.TLSSecret == "" || request.CASecret == "") { return fmt.Errorf("Both TLS secret and CA secret must be set in order to enable certificate-based authentication for replication") } + // if PgBouncerTLSSecret is set, return if: + // a) pgBouncer is not enabled OR + // b) neither TLSSecret nor CASecret is set + if request.PgBouncerTLSSecret != "" { + if !request.PgbouncerFlag { + return fmt.Errorf("pgBouncer must be enabled in order to enable TLS for pgBouncer") + } + + if request.TLSSecret == "" || request.CASecret == "" { + return fmt.Errorf("Both TLS secret and CA secret must be set in order to enable TLS for pgBouncer") + } + } + // if TLSOnly is not set and neither TLSSecret no CASecret are set, just return if !request.TLSOnly && request.TLSSecret == "" && request.CASecret == "" { return nil @@ -2184,6 +2315,15 @@ func validateClusterTLS(request *msgs.CreateClusterRequest) error { } } + // then, if set, the pgBouncer TLS secret + if request.PgBouncerTLSSecret != "" { + if _, err := apiserver.Clientset. + CoreV1().Secrets(request.Namespace). + Get(ctx, request.PgBouncerTLSSecret, metav1.GetOptions{}); err != nil { + return err + } + } + // after this, we are validated! return nil } diff --git a/internal/apiserver/clusterservice/clusterservice.go b/internal/apiserver/clusterservice/clusterservice.go index d0f31df636..9ab3c1c9eb 100644 --- a/internal/apiserver/clusterservice/clusterservice.go +++ b/internal/apiserver/clusterservice/clusterservice.go @@ -1,7 +1,7 @@ package clusterservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -77,19 +77,18 @@ func CreateClusterHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = CreateCluster(&request, ns, username) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // ShowClusterHandler ... @@ -150,7 +149,7 @@ func ShowClusterHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} resp.Results = make([]msgs.ShowClusterDetail, 0) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -158,13 +157,12 @@ func ShowClusterHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} resp.Results = make([]msgs.ShowClusterDetail, 0) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ShowCluster(clustername, selector, ccpimagetag, ns, allflag) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // DeleteClusterHandler ... @@ -225,19 +223,18 @@ func DeleteClusterHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} resp.Results = make([]string, 0) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} resp.Results = make([]string, 0) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = DeleteCluster(clustername, selector, deleteData, deleteBackups, ns, username) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // TestClusterHandler ... @@ -290,24 +287,24 @@ func TestClusterHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = TestCluster(clustername, selector, ns, username, request.AllFlag) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } // UpdateClusterHandler ... -// pgo update cluster mycluster --autofail=true -// pgo update cluster --selector=env=research --autofail=false +// pgo update cluster mycluster --enable-autofail +// pgo update cluster --selector=env=research --disable-autofail // returns a UpdateClusterResponse func UpdateClusterHandler(w http.ResponseWriter, r *http.Request) { // swagger:operation POST /clustersupdate clusterservice clustersupdate @@ -352,7 +349,7 @@ func UpdateClusterHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} resp.Results = make([]string, 0) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -360,11 +357,10 @@ func UpdateClusterHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} resp.Results = make([]string, 0) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = UpdateCluster(&request) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/clusterservice/scaleimpl.go b/internal/apiserver/clusterservice/scaleimpl.go index 4148283bfb..2c98c02fef 100644 --- a/internal/apiserver/clusterservice/scaleimpl.go +++ b/internal/apiserver/clusterservice/scaleimpl.go @@ -1,7 +1,7 @@ package clusterservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,7 +18,6 @@ limitations under the License. import ( "context" "fmt" - "strconv" "strings" "github.com/crunchydata/postgres-operator/internal/apiserver" @@ -28,26 +27,27 @@ import ( msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // ScaleCluster ... -func ScaleCluster(name, replicaCount, storageConfig, nodeLabel, - ccpImageTag, serviceType, ns, pgouser string) msgs.ClusterScaleResponse { +func ScaleCluster(request msgs.ClusterScaleRequest, pgouser string) msgs.ClusterScaleResponse { ctx := context.TODO() var err error response := msgs.ClusterScaleResponse{} response.Status = msgs.Status{Code: msgs.Ok, Msg: ""} - if name == "all" { + if request.ReplicaCount < 1 { + log.Error("replica count less than 1, no replicas added") response.Status.Code = msgs.Error - response.Status.Msg = "all is not allowed for the scale command" + response.Status.Msg = "replica count must be at least 1" return response } - cluster, err := apiserver.Clientset.CrunchydataV1().Pgclusters(ns).Get(ctx, name, metav1.GetOptions{}) + cluster, err := apiserver.Clientset.CrunchydataV1().Pgclusters(request.Namespace).Get(ctx, request.Name, metav1.GetOptions{}) if kerrors.IsNotFound(err) { log.Error("no clusters found") @@ -73,71 +73,57 @@ func ScaleCluster(name, replicaCount, storageConfig, nodeLabel, spec := crv1.PgreplicaSpec{} - //refer to the cluster's replica storage setting by default + // refer to the cluster's replica storage setting by default spec.ReplicaStorage = cluster.Spec.ReplicaStorage - //allow for user override - if storageConfig != "" { - spec.ReplicaStorage, _ = apiserver.Pgo.GetStorageSpec(storageConfig) + // allow for user override + if request.StorageConfig != "" { + spec.ReplicaStorage, _ = apiserver.Pgo.GetStorageSpec(request.StorageConfig) } spec.UserLabels = cluster.Spec.UserLabels - if ccpImageTag != "" { - spec.UserLabels[config.LABEL_CCP_IMAGE_TAG_KEY] = ccpImageTag - } - if serviceType != "" { - if serviceType != config.DEFAULT_SERVICE_TYPE && - serviceType != config.NODEPORT_SERVICE_TYPE && - serviceType != config.LOAD_BALANCER_SERVICE_TYPE { - response.Status.Code = msgs.Error - response.Status.Msg = "error --service-type should be either ClusterIP, NodePort, or LoadBalancer " - return response - } - spec.UserLabels[config.LABEL_SERVICE_TYPE] = serviceType + if request.CCPImageTag != "" { + spec.UserLabels[config.LABEL_CCP_IMAGE_TAG_KEY] = request.CCPImageTag } - //set replica node lables to blank to start with, then check for overrides - spec.UserLabels[config.LABEL_NODE_LABEL_KEY] = "" - spec.UserLabels[config.LABEL_NODE_LABEL_VALUE] = "" + // check the optional ServiceType paramater + switch request.ServiceType { + default: + response.Status.Code = msgs.Error + response.Status.Msg = fmt.Sprintf("invalid service type %q", request.ServiceType) + return response + case v1.ServiceTypeClusterIP, v1.ServiceTypeNodePort, + v1.ServiceTypeLoadBalancer, v1.ServiceTypeExternalName, "": + spec.ServiceType = request.ServiceType + } // validate & parse nodeLabel if exists - if nodeLabel != "" { - if err = apiserver.ValidateNodeLabel(nodeLabel); err != nil { + if request.NodeLabel != "" { + if err = apiserver.ValidateNodeLabel(request.NodeLabel); err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() return response } - parts := strings.Split(nodeLabel, "=") - spec.UserLabels[config.LABEL_NODE_LABEL_KEY] = parts[0] - spec.UserLabels[config.LABEL_NODE_LABEL_VALUE] = parts[1] + nodeLabel := strings.Split(request.NodeLabel, "=") + spec.NodeAffinity = util.GenerateNodeAffinity(request.NodeAffinityType, nodeLabel[0], []string{nodeLabel[1]}) - log.Debug("using user entered node label for replica creation") + log.Debugf("using node label %s", request.NodeLabel) } labels := make(map[string]string) labels[config.LABEL_PG_CLUSTER] = cluster.Spec.Name spec.ClusterName = cluster.Spec.Name - - var rc int - rc, err = strconv.Atoi(replicaCount) - if err != nil { - log.Error(err.Error()) - response.Status.Code = msgs.Error - response.Status.Msg = err.Error() - return response - } + spec.Tolerations = request.Tolerations labels[config.LABEL_PGOUSER] = pgouser - labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = cluster.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] - - for i := 0; i < rc; i++ { + for i := 0; i < request.ReplicaCount; i++ { uniqueName := util.RandStringBytesRmndr(4) labels[config.LABEL_NAME] = cluster.Spec.Name + "-" + uniqueName - spec.Namespace = ns + spec.Namespace = cluster.Namespace spec.Name = labels[config.LABEL_NAME] newInstance := &crv1.Pgreplica{ @@ -152,8 +138,8 @@ func ScaleCluster(name, replicaCount, storageConfig, nodeLabel, }, } - _, err = apiserver.Clientset.CrunchydataV1().Pgreplicas(ns).Create(ctx, newInstance, metav1.CreateOptions{}) - if err != nil { + if _, err := apiserver.Clientset.CrunchydataV1().Pgreplicas(cluster.Namespace).Create(ctx, + newInstance, metav1.CreateOptions{}); err != nil { log.Error(" in creating Pgreplica instance" + err.Error()) } @@ -204,7 +190,6 @@ func ScaleQuery(name, ns string) msgs.ScaleQueryResponse { } replicationStatusResponse, err := util.ReplicationStatus(replicationStatusRequest, false, true) - // if an error is return, log the message, and return the response if err != nil { log.Error(err.Error()) @@ -301,15 +286,8 @@ func ScaleDown(deleteData bool, clusterName, replicaName, ns string) msgs.ScaleD return response } - //create the rmdata task which does the cleanup - - clusterPGHAScope := cluster.ObjectMeta.Labels[config.LABEL_PGHA_SCOPE] - deleteBackups := false - isReplica := true - isBackup := false - taskName := replicaName + "-rmdata" - err = apiserver.CreateRMDataTask(clusterName, replicaName, taskName, deleteBackups, deleteData, isReplica, isBackup, ns, clusterPGHAScope) - if err != nil { + // create the rmdata task which does the cleanup + if err := util.CreateRMDataTask(apiserver.Clientset, cluster, replicaName, false, deleteData, true, false); err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() return response diff --git a/internal/apiserver/clusterservice/scaleservice.go b/internal/apiserver/clusterservice/scaleservice.go index 92db853216..414e455623 100644 --- a/internal/apiserver/clusterservice/scaleservice.go +++ b/internal/apiserver/clusterservice/scaleservice.go @@ -1,7 +1,7 @@ package clusterservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -40,99 +40,84 @@ func ScaleClusterHandler(w http.ResponseWriter, r *http.Request) { // produces: // - application/json // parameters: - // - name: "name" - // description: "Cluster Name" - // in: "path" - // type: "string" - // required: true - // - name: "version" - // description: "Client Version" - // in: "path" - // type: "string" - // required: true - // - name: "namespace" - // description: "Namespace" - // in: "path" - // type: "string" - // required: true - // - name: "replica-count" - // description: "The replica count to apply to the clusters." - // in: "path" - // type: "int" - // required: true - // - name: "storage-config" - // description: "The service type to use in the replica Service. If not set, the default in pgo.yaml will be used." - // in: "path" - // type: "string" - // required: false - // - name: "node-label" - // description: "The node label (key) to use in placing the replica database. If not set, any node is used." - // in: "path" - // type: "string" - // required: false - // - name: "service-type" - // description: "The service type to use in the replica Service. If not set, the default in pgo.yaml will be used." - // in: "path" - // type: "string" - // required: false - // - name: "ccp-image-tag" - // description: "The CCPImageTag to use for cluster creation. If specified, overrides the .pgo.yaml setting." - // in: "path" - // type: "string" - // required: false + // - name: "PostgreSQL Scale Cluster" + // in: "body" + // schema: + // "$ref": "#/definitions/ClusterScaleRequest" // responses: // '200': // description: Output // schema: // "$ref": "#/definitions/ClusterScaleResponse" - //SCALE_CLUSTER_PERM - // This is a pain to document because it doesn't use a struct... - var ns string - vars := mux.Vars(r) - - clusterName := vars[config.LABEL_NAME] - namespace := r.URL.Query().Get(config.LABEL_NAMESPACE) - replicaCount := r.URL.Query().Get(config.LABEL_REPLICA_COUNT) - storageConfig := r.URL.Query().Get(config.LABEL_STORAGE_CONFIG) - nodeLabel := r.URL.Query().Get(config.LABEL_NODE_LABEL) - serviceType := r.URL.Query().Get(config.LABEL_SERVICE_TYPE) - clientVersion := r.URL.Query().Get(config.LABEL_VERSION) - ccpImageTag := r.URL.Query().Get(config.LABEL_CCP_IMAGE_TAG_KEY) - - log.Debugf("ScaleClusterHandler parameters name [%s] namespace [%s] replica-count [%s] "+ - "storage-config [%s] node-label [%s] service-type [%s] version [%s]"+ - "ccp-image-tag [%s]", clusterName, namespace, replicaCount, - storageConfig, nodeLabel, serviceType, clientVersion, ccpImageTag) + log.Debug("clusterservice.ScaleClusterHandler called") + // first, check that the requesting user is authorized to make this request username, err := apiserver.Authn(apiserver.SCALE_CLUSTER_PERM, w, r) if err != nil { return } - w.WriteHeader(http.StatusOK) + // decode the request parameters + request := msgs.ClusterScaleRequest{} + + if err := json.NewDecoder(r.Body).Decode(&request); err != nil { + _ = json.NewEncoder(w).Encode(msgs.ClusterScaleResponse{ + Status: msgs.Status{ + Code: msgs.Error, + Msg: err.Error(), + }, + }) + return + } + + // set some of the header...though we really should not be setting the HTTP + // Status upfront, but whatever + w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) - resp := msgs.ClusterScaleResponse{} - resp.Status = msgs.Status{Code: msgs.Ok, Msg: ""} + // determine if this is the correct client version + if request.ClientVersion != msgs.PGO_VERSION { + _ = json.NewEncoder(w).Encode(msgs.ClusterScaleResponse{ + Status: msgs.Status{ + Code: msgs.Error, + Msg: apiserver.VERSION_MISMATCH_ERROR, + }, + }) + return + } - if clientVersion != msgs.PGO_VERSION { - resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + // ensure that the user has access to this namespace. if not, error out + if _, err := apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace); err != nil { + _ = json.NewEncoder(w).Encode(msgs.ClusterScaleResponse{ + Status: msgs.Status{ + Code: msgs.Error, + Msg: err.Error(), + }, + }) return } - ns, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace) - if err != nil { - resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + // ensure that the cluster name is set in the URL, as the request parameters + // will use that as precedence + vars := mux.Vars(r) + clusterName, ok := vars[config.LABEL_NAME] + + if !ok { + _ = json.NewEncoder(w).Encode(msgs.ClusterScaleResponse{ + Status: msgs.Status{ + Code: msgs.Error, + Msg: "cluster name required in URL", + }, + }) return } - // TODO too many params need to create a struct for this - resp = ScaleCluster(clusterName, replicaCount, storageConfig, nodeLabel, - ccpImageTag, serviceType, ns, username) + request.Name = clusterName + + response := ScaleCluster(request, username) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(response) } // ScaleQueryHandler ... @@ -190,19 +175,19 @@ func ScaleQueryHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ScaleQuery(clusterName, ns) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } // ScaleDownHandler ... @@ -273,23 +258,23 @@ func ScaleDownHandler(w http.ResponseWriter, r *http.Request) { deleteData, err := strconv.ParseBool(tmp) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ScaleDown(deleteData, clusterName, replicaName, ns) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/common.go b/internal/apiserver/common.go index 15c070e0dc..37395cb8c8 100644 --- a/internal/apiserver/common.go +++ b/internal/apiserver/common.go @@ -1,7 +1,7 @@ package apiserver /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -19,10 +19,11 @@ import ( "context" "errors" "fmt" - "strconv" + "strings" - "github.com/crunchydata/postgres-operator/internal/config" + pgpassword "github.com/crunchydata/postgres-operator/internal/postgres/password" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + log "github.com/sirupsen/logrus" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -41,10 +42,12 @@ const ( ) var ( - backrestStorageTypes = []string{"local", "s3"} // ErrDBContainerNotFound is an error that indicates that a "database" container // could not be found in a specific pod ErrDBContainerNotFound = errors.New("\"database\" container not found in pod") + // ErrPasswordTypeInvalid is used when a string that's not included in + // PasswordTypeStrings is used + ErrPasswordTypeInvalid = errors.New("invalid password type. choices are (md5, scram-sha-256)") // ErrStandbyNotAllowed contains the error message returned when an API call is not // permitted because it involves a cluster that is in standby mode ErrStandbyNotAllowed = errors.New("Action not permitted because standby mode is enabled") @@ -55,47 +58,25 @@ var ( "Operator installation") ) -func CreateRMDataTask(clusterName, replicaName, taskName string, deleteBackups, deleteData, isReplica, isBackup bool, ns, clusterPGHAScope string) error { - ctx := context.TODO() - var err error - - //create pgtask CRD - spec := crv1.PgtaskSpec{} - spec.Namespace = ns - spec.Name = taskName - spec.TaskType = crv1.PgtaskDeleteData - - spec.Parameters = make(map[string]string) - spec.Parameters[config.LABEL_DELETE_DATA] = strconv.FormatBool(deleteData) - spec.Parameters[config.LABEL_DELETE_BACKUPS] = strconv.FormatBool(deleteBackups) - spec.Parameters[config.LABEL_IS_REPLICA] = strconv.FormatBool(isReplica) - spec.Parameters[config.LABEL_IS_BACKUP] = strconv.FormatBool(isBackup) - spec.Parameters[config.LABEL_PG_CLUSTER] = clusterName - spec.Parameters[config.LABEL_REPLICA_NAME] = replicaName - spec.Parameters[config.LABEL_PGHA_SCOPE] = clusterPGHAScope - - newInstance := &crv1.Pgtask{ - ObjectMeta: metav1.ObjectMeta{ - Name: taskName, - }, - Spec: spec, - } - newInstance.ObjectMeta.Labels = make(map[string]string) - newInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] = clusterName - newInstance.ObjectMeta.Labels[config.LABEL_RMDATA] = "true" +// passwordTypeStrings is a mapping of strings of password types to their +// corresponding value of the structured password type +var passwordTypeStrings = map[string]pgpassword.PasswordType{ + "": pgpassword.MD5, + "md5": pgpassword.MD5, + "scram": pgpassword.SCRAM, + "scram-sha-256": pgpassword.SCRAM, +} - _, err = Clientset.CrunchydataV1().Pgtasks(ns).Create(ctx, newInstance, metav1.CreateOptions{}) - if err != nil { - log.Error(err) - return err - } +// GetPasswordType returns the enumerated password type based on the string, and +// an error if it cannot match one +func GetPasswordType(passwordTypeStr string) (pgpassword.PasswordType, error) { + passwordType, ok := passwordTypeStrings[passwordTypeStr] - return err - -} + if !ok { + return passwordType, ErrPasswordTypeInvalid + } -func GetBackrestStorageTypes() []string { - return backrestStorageTypes + return passwordType, nil } // IsValidPVC determines if a PVC with the name provided exits @@ -113,6 +94,64 @@ func IsValidPVC(pvcName, ns string) bool { return pvc != nil } +// ValidateBackrestStorageTypeForCommand determines if a submitted pgBackRest +// storage value can be used as part of a pgBackRest operation based upon the +// storage types used by the PostgreSQL cluster itself +func ValidateBackrestStorageTypeForCommand(cluster *crv1.Pgcluster, storageTypeStr string) error { + // first, parse the submitted storage type string to see what we're up against + storageTypes, err := crv1.ParseBackrestStorageTypes(storageTypeStr) + + // if there is an error parsing the string and it's not due to the string + // being empty, return the error + // if it is due to an empty string, then return so that the defaults will be + // used + if err != nil { + if errors.Is(err, crv1.ErrStorageTypesEmpty) { + return nil + } + return err + } + + // there can only be one storage type used for a command (for now), so ensure + // this condition is sated + if len(storageTypes) > 1 { + return fmt.Errorf("you can only select one storage type") + } + + // a special case: the list of storage types is empty. if this is not a posix + // (or local) storage type, then return an error. Otherwise, we can exit here. + if len(cluster.Spec.BackrestStorageTypes) == 0 { + if !(storageTypes[0] == crv1.BackrestStorageTypePosix || storageTypes[0] == crv1.BackrestStorageTypeLocal) { + return fmt.Errorf("%w: choices are: posix", crv1.ErrInvalidStorageType) + } + return nil + } + + // now, see if the select storage type is available in the list of storage + // types on the cluster + ok := false + for _, storageType := range cluster.Spec.BackrestStorageTypes { + switch storageTypes[0] { + default: + ok = ok || (storageType == storageTypes[0]) + case crv1.BackrestStorageTypePosix, crv1.BackrestStorageTypeLocal: + ok = ok || (storageType == crv1.BackrestStorageTypePosix || storageType == crv1.BackrestStorageTypeLocal) + } + } + + if !ok { + choices := make([]string, len(cluster.Spec.BackrestStorageTypes)) + for i, storageType := range cluster.Spec.BackrestStorageTypes { + choices[i] = string(storageType) + } + + return fmt.Errorf("%w: choices are: %s", + crv1.ErrInvalidStorageType, strings.Join(choices, " ")) + } + + return nil +} + // ValidateResourceRequestLimit validates that a Kubernetes Requests/Limit pair // is valid, both by validating the values are valid quantity values, and then // by checking that the limit >= request. This also needs to check against the diff --git a/internal/apiserver/common_test.go b/internal/apiserver/common_test.go index da909d2ba6..8d217bbc9d 100644 --- a/internal/apiserver/common_test.go +++ b/internal/apiserver/common_test.go @@ -1,7 +1,7 @@ package apiserver /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,11 +16,196 @@ limitations under the License. */ import ( + "errors" "testing" + pgpassword "github.com/crunchydata/postgres-operator/internal/postgres/password" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + "k8s.io/apimachinery/pkg/api/resource" ) +func TestGetPasswordType(t *testing.T) { + t.Run("valid", func(t *testing.T) { + tests := map[string]pgpassword.PasswordType{ + "": pgpassword.MD5, + "md5": pgpassword.MD5, + "scram": pgpassword.SCRAM, + "scram-sha-256": pgpassword.SCRAM, + } + + for passwordTypeStr, expected := range tests { + t.Run(passwordTypeStr, func(t *testing.T) { + passwordType, err := GetPasswordType(passwordTypeStr) + if err != nil { + t.Error(err) + return + } + + if passwordType != expected { + t.Errorf("password type %q should yield %d", passwordTypeStr, expected) + } + }) + } + }) + + t.Run("invalid", func(t *testing.T) { + tests := map[string]error{ + "magic": ErrPasswordTypeInvalid, + "scram-sha-512": ErrPasswordTypeInvalid, + } + + for passwordTypeStr, expected := range tests { + t.Run(passwordTypeStr, func(t *testing.T) { + if _, err := GetPasswordType(passwordTypeStr); !errors.Is(err, expected) { + t.Errorf("password type %q should yield error %q", passwordTypeStr, expected.Error()) + } + }) + } + }) +} + +func TestValidateBackrestStorageTypeForCommand(t *testing.T) { + cluster := &crv1.Pgcluster{ + Spec: crv1.PgclusterSpec{}, + } + + t.Run("empty repo type", func(t *testing.T) { + err := ValidateBackrestStorageTypeForCommand(cluster, "") + + if err != nil { + t.Fatalf("expected no error, actual error: %s", err.Error()) + } + }) + + t.Run("invalid repo type", func(t *testing.T) { + err := ValidateBackrestStorageTypeForCommand(cluster, "bad") + + if err == nil { + t.Fatalf("expected invalid repo type to return an error, no error returned") + } + }) + + t.Run("multiple repo types", func(t *testing.T) { + err := ValidateBackrestStorageTypeForCommand(cluster, "posix,s3") + + if err == nil { + t.Fatalf("expected error") + } + }) + + t.Run("posix repo, no repo types on resource", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{} + err := ValidateBackrestStorageTypeForCommand(cluster, "posix") + + if err != nil { + t.Fatalf("expected no error, actual error: %s", err.Error()) + } + }) + + t.Run("local repo, no repo types on resource", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{} + err := ValidateBackrestStorageTypeForCommand(cluster, "local") + + if err != nil { + t.Fatalf("expected no error, actual error: %s", err.Error()) + } + }) + + t.Run("s3 repo, no repo types on resource", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{} + err := ValidateBackrestStorageTypeForCommand(cluster, "s3") + + if err == nil { + t.Fatalf("expected error") + } + }) + + t.Run("posix repo, posix repo type available", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{crv1.BackrestStorageTypePosix} + err := ValidateBackrestStorageTypeForCommand(cluster, "posix") + + if err != nil { + t.Fatalf("expected no error, actual error: %s", err.Error()) + } + }) + + t.Run("posix repo, posix repo type unavailable", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{crv1.BackrestStorageTypeS3} + err := ValidateBackrestStorageTypeForCommand(cluster, "posix") + + if err == nil { + t.Fatalf("expected error") + } + }) + + t.Run("posix repo, local repo type available", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{crv1.BackrestStorageTypeLocal} + err := ValidateBackrestStorageTypeForCommand(cluster, "posix") + + if err != nil { + t.Fatalf("expected no error, actual error: %s", err.Error()) + } + }) + + t.Run("posix repo, multi-repo", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{ + crv1.BackrestStorageTypeS3, + crv1.BackrestStorageTypePosix, + } + err := ValidateBackrestStorageTypeForCommand(cluster, "posix") + + if err != nil { + t.Fatalf("expected no error, actual error: %s", err.Error()) + } + }) + + t.Run("local repo, local repo type available", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{crv1.BackrestStorageTypeLocal} + err := ValidateBackrestStorageTypeForCommand(cluster, "local") + + if err != nil { + t.Fatalf("expected no error, actual error: %s", err.Error()) + } + }) + + t.Run("local repo, local repo type unavailable", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{crv1.BackrestStorageTypeS3} + err := ValidateBackrestStorageTypeForCommand(cluster, "local") + + if err == nil { + t.Fatalf("expected error") + } + }) + + t.Run("local repo, posix repo type available", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{crv1.BackrestStorageTypePosix} + err := ValidateBackrestStorageTypeForCommand(cluster, "local") + + if err != nil { + t.Fatalf("expected no error, actual error: %s", err.Error()) + } + }) + + t.Run("s3 repo, s3 repo type available", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{crv1.BackrestStorageTypeS3} + err := ValidateBackrestStorageTypeForCommand(cluster, "s3") + + if err != nil { + t.Fatalf("expected no error, actual error: %s", err.Error()) + } + }) + + t.Run("s3 repo, s3 repo type unavailable", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{crv1.BackrestStorageTypePosix} + err := ValidateBackrestStorageTypeForCommand(cluster, "s3") + + if err == nil { + t.Fatalf("expected error") + } + }) +} + func TestValidateResourceRequestLimit(t *testing.T) { t.Run("valid", func(t *testing.T) { resources := []struct{ request, limit, defaultRequest string }{ diff --git a/internal/apiserver/configservice/configimpl.go b/internal/apiserver/configservice/configimpl.go index 76d891d5ed..1aa47a1f21 100644 --- a/internal/apiserver/configservice/configimpl.go +++ b/internal/apiserver/configservice/configimpl.go @@ -1,7 +1,7 @@ package configservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/apiserver/configservice/configservice.go b/internal/apiserver/configservice/configservice.go index 1f70934888..10a00dd4e5 100644 --- a/internal/apiserver/configservice/configservice.go +++ b/internal/apiserver/configservice/configservice.go @@ -1,7 +1,7 @@ package configservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,10 +17,11 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) // ShowConfigHandler ... @@ -68,17 +69,17 @@ func ShowConfigHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } _, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ShowConfig() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/dfservice/dfimpl.go b/internal/apiserver/dfservice/dfimpl.go index 6c11bd9d36..bcbdfc68ad 100644 --- a/internal/apiserver/dfservice/dfimpl.go +++ b/internal/apiserver/dfservice/dfimpl.go @@ -1,7 +1,7 @@ package dfservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -25,10 +25,12 @@ import ( "github.com/crunchydata/postgres-operator/internal/kubeapi" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" + log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/kubernetes" ) @@ -124,7 +126,6 @@ func getClaimCapacity(clientset kubernetes.Interface, pvcName, ns string) (strin log.Debugf("in df pvc name found to be %s", pvcName) pvc, err := clientset.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{}) - if err != nil { log.Error(err) return "", err @@ -147,10 +148,15 @@ func getClusterDf(cluster *crv1.Pgcluster, clusterResultsChannel chan msgs.DfDet ctx := context.TODO() log.Debugf("pod df: %s", cluster.Spec.Name) - selector := fmt.Sprintf("%s=%s", config.LABEL_PG_CLUSTER, cluster.Spec.Name) + selector := fmt.Sprintf("%s=%s,!%s", + config.LABEL_PG_CLUSTER, cluster.Spec.Name, config.LABEL_PGHA_BOOTSTRAP) - pods, err := apiserver.Clientset.CoreV1().Pods(cluster.Spec.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + options := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), + LabelSelector: selector, + } + pods, err := apiserver.Clientset.CoreV1().Pods(cluster.Spec.Namespace).List(ctx, options) // if there is an error attempting to get the pods, just return if err != nil { errorChannel <- err @@ -299,7 +305,6 @@ func getPodDf(cluster *crv1.Pgcluster, pod *v1.Pod, podResultsChannel chan msgs. stdout, stderr, err := kubeapi.ExecToPodThroughAPI(apiserver.RESTConfig, apiserver.Clientset, cmd, pvcContainerName, pod.Name, cluster.Spec.Namespace, nil) - // if the command fails, exit here if err != nil { err := fmt.Errorf(stderr) @@ -310,7 +315,7 @@ func getPodDf(cluster *crv1.Pgcluster, pod *v1.Pod, podResultsChannel chan msgs. // have to parse the size out from the statement. Size is in bytes if _, err = fmt.Sscan(stdout, &result.PVCUsed); err != nil { - err := fmt.Errorf("could not find the size of pvc %s: %v", result.PVCName, err) + err := fmt.Errorf("could not find the size of pvc %s: %w", result.PVCName, err) log.Error(err) errorChannel <- err return diff --git a/internal/apiserver/dfservice/dfservice.go b/internal/apiserver/dfservice/dfservice.go index 325e20257a..3a4e6e4bd3 100644 --- a/internal/apiserver/dfservice/dfservice.go +++ b/internal/apiserver/dfservice/dfservice.go @@ -1,7 +1,7 @@ package dfservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -69,7 +69,7 @@ func DfHandler(w http.ResponseWriter, r *http.Request) { if err := json.NewDecoder(r.Body).Decode(&request); err != nil { response := CreateErrorResponse(err.Error()) - json.NewEncoder(w).Encode(response) + _ = json.NewEncoder(w).Encode(response) return } @@ -84,14 +84,14 @@ func DfHandler(w http.ResponseWriter, r *http.Request) { // check that the client versions match. If they don't, error out if request.ClientVersion != msgs.PGO_VERSION { response := CreateErrorResponse(apiserver.VERSION_MISMATCH_ERROR) - json.NewEncoder(w).Encode(response) + _ = json.NewEncoder(w).Encode(response) return } // ensure that the user has access to this namespace. if not, error out if _, err := apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace); err != nil { response := CreateErrorResponse(err.Error()) - json.NewEncoder(w).Encode(response) + _ = json.NewEncoder(w).Encode(response) return } @@ -99,5 +99,5 @@ func DfHandler(w http.ResponseWriter, r *http.Request) { response := DfCluster(request) // turn the response into JSON - json.NewEncoder(w).Encode(response) + _ = json.NewEncoder(w).Encode(response) } diff --git a/internal/apiserver/failoverservice/failoverimpl.go b/internal/apiserver/failoverservice/failoverimpl.go index 0e6e56df58..b6d45e456f 100644 --- a/internal/apiserver/failoverservice/failoverimpl.go +++ b/internal/apiserver/failoverservice/failoverimpl.go @@ -1,7 +1,7 @@ package failoverservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,29 +18,36 @@ limitations under the License. import ( "context" "errors" + "fmt" + "strings" "github.com/crunchydata/postgres-operator/internal/apiserver" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/operator" "github.com/crunchydata/postgres-operator/internal/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" + log "github.com/sirupsen/logrus" - v1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" ) -// CreateFailover ... +// CreateFailover is the API endpoint for triggering a manual failover of a +// cluster. It performs this function inline, i.e. it does not trigger any +// asynchronous methods. +// // pgo failover mycluster -// pgo failover all -// pgo failover --selector=name=mycluster func CreateFailover(request *msgs.CreateFailoverRequest, ns, pgouser string) msgs.CreateFailoverResponse { - ctx := context.TODO() + log.Debugf("create failover called for %s", request.ClusterName) - var err error - resp := msgs.CreateFailoverResponse{} - resp.Status.Code = msgs.Ok - resp.Status.Msg = "" - resp.Results = make([]string, 0) + resp := msgs.CreateFailoverResponse{ + Results: "", + Status: msgs.Status{ + Code: msgs.Ok, + }, + } cluster, err := validateClusterName(request.ClusterName, ns) if err != nil { @@ -57,50 +64,28 @@ func CreateFailover(request *msgs.CreateFailoverRequest, ns, pgouser string) msg return resp } - if request.Target != "" { - _, err = isValidFailoverTarget(request.Target, request.ClusterName, ns) - if err != nil { - resp.Status.Code = msgs.Error - resp.Status.Msg = err.Error() - return resp - } + if err := isValidFailoverTarget(request); err != nil { + resp.Status.Code = msgs.Error + resp.Status.Msg = err.Error() + return resp } - log.Debugf("create failover called for %s", request.ClusterName) - - // Create a pgtask - spec := crv1.PgtaskSpec{} - spec.Namespace = ns - spec.Name = request.ClusterName + "-" + config.LABEL_FAILOVER - - // previous failovers will leave a pgtask so remove it first - apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Delete(ctx, spec.Name, metav1.DeleteOptions{}) - - spec.TaskType = crv1.PgtaskFailover - spec.Parameters = make(map[string]string) - spec.Parameters[request.ClusterName] = request.ClusterName - - labels := make(map[string]string) - labels["target"] = request.Target - labels[config.LABEL_PG_CLUSTER] = request.ClusterName - labels[config.LABEL_PGOUSER] = pgouser - - newInstance := &crv1.Pgtask{ - ObjectMeta: metav1.ObjectMeta{ - Name: spec.Name, - Labels: labels, - }, - Spec: spec, + // perform the switchover or failover, depending on which flag is selected + // if we are forcing the failover, we need to use "Failover", otherwise we + // perform a controlled switchover + if request.Force { + err = operator.Failover(apiserver.Clientset, apiserver.RESTConfig, cluster, request.Target) + } else { + err = operator.Switchover(apiserver.Clientset, apiserver.RESTConfig, cluster, request.Target) } - _, err = apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Create(ctx, newInstance, metav1.CreateOptions{}) if err != nil { resp.Status.Code = msgs.Error - resp.Status.Msg = err.Error() + resp.Status.Msg = strings.ReplaceAll(err.Error(), "master", "primary") return resp } - resp.Results = append(resp.Results, "created Pgtask (failover) for cluster "+request.ClusterName) + resp.Results = "failover success for cluster " + cluster.Name return resp } @@ -109,7 +94,6 @@ func CreateFailover(request *msgs.CreateFailoverRequest, ns, pgouser string) msg // over to // pgo failover mycluster --query func QueryFailover(name, ns string) msgs.QueryFailoverResponse { - response := msgs.QueryFailoverResponse{ Results: make([]msgs.FailoverTargetSpec, 0), Status: msgs.Status{Code: msgs.Ok, Msg: ""}, @@ -139,7 +123,6 @@ func QueryFailover(name, ns string) msgs.QueryFailoverResponse { } replicationStatusResponse, err := util.ReplicationStatus(replicationStatusRequest, false, false) - // if an error is return, log the message, and return the response if err != nil { log.Error(err.Error()) @@ -175,7 +158,6 @@ func QueryFailover(name, ns string) msgs.QueryFailoverResponse { func validateClusterName(clusterName, ns string) (*crv1.Pgcluster, error) { ctx := context.TODO() cluster, err := apiserver.Clientset.CrunchydataV1().Pgclusters(ns).Get(ctx, clusterName, metav1.GetOptions{}) - if err != nil { return cluster, errors.New("no cluster found named " + clusterName) } @@ -189,35 +171,56 @@ func validateClusterName(clusterName, ns string) (*crv1.Pgcluster, error) { // specified, and then ensuring the PG pod created by the deployment is not the current primary. // If the deployment is not found, or if the pod is the current primary, an error will be returned. // Otherwise the deployment is returned. -func isValidFailoverTarget(deployName, clusterName, ns string) (*v1.Deployment, error) { +func isValidFailoverTarget(request *msgs.CreateFailoverRequest) error { ctx := context.TODO() + // if we're not forcing a failover and the target is blank, we can + // return here + // However, if we are forcing a failover and the target is blank, then we do + // have an error + if request.Target == "" { + if !request.Force { + return nil + } + + return fmt.Errorf("target is required when forcing a failover.") + } + // Using the following label selector, ensure the deployment specified using deployName exists in the // cluster specified using clusterName: // pg-cluster=clusterName,deployment-name=deployName - selector := config.LABEL_PG_CLUSTER + "=" + clusterName + "," + config.LABEL_DEPLOYMENT_NAME + "=" + deployName - deployments, err := apiserver.Clientset. - AppsV1().Deployments(ns). - List(ctx, metav1.ListOptions{LabelSelector: selector}) + options := metav1.ListOptions{ + LabelSelector: fields.AndSelectors( + fields.OneTermEqualSelector(config.LABEL_PG_CLUSTER, request.ClusterName), + fields.OneTermEqualSelector(config.LABEL_DEPLOYMENT_NAME, request.Target), + ).String(), + } + deployments, err := apiserver.Clientset.AppsV1().Deployments(request.Namespace).List(ctx, options) + if err != nil { log.Error(err) - return nil, err + return err } else if len(deployments.Items) == 0 { - return nil, errors.New("no target found named " + deployName) + return fmt.Errorf("no target found named %s", request.Target) } else if len(deployments.Items) > 1 { - return nil, errors.New("more than one target found named " + deployName) + return fmt.Errorf("more than one target found named %s", request.Target) } // Using the following label selector, determine if the target specified is the current // primary for the cluster and return an error if it is: // pg-cluster=clusterName,deployment-name=deployName,role=primary - selector = config.LABEL_PG_CLUSTER + "=" + clusterName + "," + config.LABEL_DEPLOYMENT_NAME + "=" + deployName + - "," + config.LABEL_PGHA_ROLE + "=" + config.LABEL_PGHA_ROLE_PRIMARY - pods, _ := apiserver.Clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: selector}) + options.FieldSelector = fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String() + options.LabelSelector = fields.AndSelectors( + fields.OneTermEqualSelector(config.LABEL_PG_CLUSTER, request.ClusterName), + fields.OneTermEqualSelector(config.LABEL_DEPLOYMENT_NAME, request.Target), + fields.OneTermEqualSelector(config.LABEL_PGHA_ROLE, config.LABEL_PGHA_ROLE_PRIMARY), + ).String() + + pods, _ := apiserver.Clientset.CoreV1().Pods(request.Namespace).List(ctx, options) + if len(pods.Items) > 0 { - return nil, errors.New("The primary database cannot be selected as a failover target") + return fmt.Errorf("The primary database cannot be selected as a failover target") } - return &deployments.Items[0], nil - + return nil } diff --git a/internal/apiserver/failoverservice/failoverservice.go b/internal/apiserver/failoverservice/failoverservice.go index 164d7b1545..66214bc071 100644 --- a/internal/apiserver/failoverservice/failoverservice.go +++ b/internal/apiserver/failoverservice/failoverservice.go @@ -1,7 +1,7 @@ package failoverservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,11 +17,12 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" "github.com/gorilla/mux" log "github.com/sirupsen/logrus" - "net/http" ) // CreateFailoverHandler ... @@ -65,20 +66,20 @@ func CreateFailoverHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = CreateFailover(&request, ns, username) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } // QueryFailoverHandler ... @@ -137,17 +138,17 @@ func QueryFailoverHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = QueryFailover(name, ns) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/labelservice/labelimpl.go b/internal/apiserver/labelservice/labelimpl.go index a7129249dd..7f86c89374 100644 --- a/internal/apiserver/labelservice/labelimpl.go +++ b/internal/apiserver/labelservice/labelimpl.go @@ -1,7 +1,7 @@ package labelservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,19 +17,16 @@ limitations under the License. import ( "context" - "errors" - "strings" "github.com/crunchydata/postgres-operator/internal/apiserver" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" - "github.com/crunchydata/postgres-operator/pkg/events" log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/validation" ) // Label ... 2 forms ... @@ -37,8 +34,7 @@ import ( // pgo label --label=env=prod --selector=name=mycluster func Label(request *msgs.LabelRequest, ns, pgouser string) msgs.LabelResponse { ctx := context.TODO() - var err error - var labelsMap map[string]string + resp := msgs.LabelResponse{} resp.Status.Code = msgs.Ok resp.Status.Msg = "" @@ -50,10 +46,9 @@ func Label(request *msgs.LabelRequest, ns, pgouser string) msgs.LabelResponse { return resp } - labelsMap, err = validateLabel(request.LabelCmdLabel, ns) - if err != nil { + if err := util.ValidateLabels(request.Labels); err != nil { resp.Status.Code = msgs.Error - resp.Status.Msg = "labels not formatted correctly" + resp.Status.Msg = err.Error() return resp } @@ -90,7 +85,7 @@ func Label(request *msgs.LabelRequest, ns, pgouser string) msgs.LabelResponse { } clusterList = *cl } else { - //each arg represents a cluster name + // each arg represents a cluster name items := make([]crv1.Pgcluster, 0) for _, cluster := range request.Args { result, err := apiserver.Clientset.CrunchydataV1().Pgclusters(ns).Get(ctx, cluster, metav1.GetOptions{}) @@ -109,13 +104,12 @@ func Label(request *msgs.LabelRequest, ns, pgouser string) msgs.LabelResponse { resp.Results = append(resp.Results, c.Spec.Name) } - addLabels(clusterList.Items, request.DryRun, request.LabelCmdLabel, labelsMap, ns, pgouser) + addLabels(clusterList.Items, request.DryRun, request.Labels, ns) return resp - } -func addLabels(items []crv1.Pgcluster, DryRun bool, LabelCmdLabel string, newLabels map[string]string, ns, pgouser string) { +func addLabels(items []crv1.Pgcluster, DryRun bool, newLabels map[string]string, ns string) { ctx := context.TODO() patchBytes, err := kubeapi.NewMergePatch().Add("metadata", "labels")(newLabels).Bytes() if err != nil { @@ -133,32 +127,11 @@ func addLabels(items []crv1.Pgcluster, DryRun bool, LabelCmdLabel string, newLab if err != nil { log.Error(err.Error()) } - - //publish event for create label - topics := make([]string, 1) - topics[0] = events.EventTopicCluster - - f := events.EventCreateLabelFormat{ - EventHeader: events.EventHeader{ - Namespace: ns, - Username: pgouser, - Topic: topics, - EventType: events.EventCreateLabel, - }, - Clustername: items[i].Spec.Name, - Label: LabelCmdLabel, - } - - err = events.Publish(f) - if err != nil { - log.Error(err.Error()) - } - } } for i := 0; i < len(items); i++ { - //get deployments for this CRD + // get deployments for this CRD selector := config.LABEL_PG_CLUSTER + "=" + items[i].Spec.Name deployments, err := apiserver.Clientset. AppsV1().Deployments(ns). @@ -168,7 +141,7 @@ func addLabels(items []crv1.Pgcluster, DryRun bool, LabelCmdLabel string, newLab } for _, d := range deployments.Items { - //update Deployment with the label + // update Deployment with the label if !DryRun { log.Debugf("patching deployment %s: %s", d.Name, patchBytes) _, err := apiserver.Clientset.AppsV1().Deployments(ns). @@ -182,38 +155,12 @@ func addLabels(items []crv1.Pgcluster, DryRun bool, LabelCmdLabel string, newLab } } -func validateLabel(LabelCmdLabel, ns string) (map[string]string, error) { - var err error - labelMap := make(map[string]string) - userValues := strings.Split(LabelCmdLabel, ",") - for _, v := range userValues { - pair := strings.Split(v, "=") - if len(pair) != 2 { - log.Error("label format incorrect, requires name=value") - return labelMap, errors.New("label format incorrect, requires name=value") - } - - errs := validation.IsDNS1035Label(pair[0]) - if len(errs) > 0 { - return labelMap, errors.New("label format incorrect, requires name=value " + errs[0]) - } - errs = validation.IsDNS1035Label(pair[1]) - if len(errs) > 0 { - return labelMap, errors.New("label format incorrect, requires name=value " + errs[0]) - } - - labelMap[pair[0]] = pair[1] - } - return labelMap, err -} - // DeleteLabel ... // pgo delete label mycluster yourcluster --label=env=prod // pgo delete label --label=env=prod --selector=group=somegroup func DeleteLabel(request *msgs.DeleteLabelRequest, ns string) msgs.LabelResponse { ctx := context.TODO() - var err error - var labelsMap map[string]string + resp := msgs.LabelResponse{} resp.Status.Code = msgs.Ok resp.Status.Msg = "" @@ -225,8 +172,7 @@ func DeleteLabel(request *msgs.DeleteLabelRequest, ns string) msgs.LabelResponse return resp } - labelsMap, err = validateLabel(request.LabelCmdLabel, ns) - if err != nil { + if err := util.ValidateLabels(request.Labels); err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = "labels not formatted correctly" return resp @@ -263,7 +209,7 @@ func DeleteLabel(request *msgs.DeleteLabelRequest, ns string) msgs.LabelResponse } clusterList = *cl } else { - //each arg represents a cluster name + // each arg represents a cluster name items := make([]crv1.Pgcluster, 0) for _, cluster := range request.Args { result, err := apiserver.Clientset.CrunchydataV1().Pgclusters(ns).Get(ctx, cluster, metav1.GetOptions{}) @@ -282,18 +228,16 @@ func DeleteLabel(request *msgs.DeleteLabelRequest, ns string) msgs.LabelResponse resp.Results = append(resp.Results, "deleting label from "+c.Spec.Name) } - err = deleteLabels(clusterList.Items, request.LabelCmdLabel, labelsMap, ns) - if err != nil { + if err := deleteLabels(clusterList.Items, request.Labels, ns); err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() return resp } return resp - } -func deleteLabels(items []crv1.Pgcluster, LabelCmdLabel string, labelsMap map[string]string, ns string) error { +func deleteLabels(items []crv1.Pgcluster, labelsMap map[string]string, ns string) error { ctx := context.TODO() patch := kubeapi.NewMergePatch() for key := range labelsMap { @@ -316,7 +260,7 @@ func deleteLabels(items []crv1.Pgcluster, LabelCmdLabel string, labelsMap map[st } for i := 0; i < len(items); i++ { - //get deployments for this CRD + // get deployments for this CRD selector := config.LABEL_PG_CLUSTER + "=" + items[i].Spec.Name deployments, err := apiserver.Clientset. AppsV1().Deployments(ns). diff --git a/internal/apiserver/labelservice/labelservice.go b/internal/apiserver/labelservice/labelservice.go index f13054fd17..f56d3fe1aa 100644 --- a/internal/apiserver/labelservice/labelservice.go +++ b/internal/apiserver/labelservice/labelservice.go @@ -1,7 +1,7 @@ package labelservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,10 +17,11 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) // LabelHandler ... @@ -64,20 +65,20 @@ func LabelHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR resp.Status.Code = msgs.Error - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status = msgs.Status{Msg: err.Error(), Code: msgs.Error} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = Label(&request, ns, username) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } // DeleteLabelHandler ... @@ -120,18 +121,18 @@ func DeleteLabelHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Msg: apiserver.VERSION_MISMATCH_ERROR, Code: msgs.Error} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status = msgs.Status{Msg: err.Error(), Code: msgs.Error} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = DeleteLabel(&request, ns) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/middleware.go b/internal/apiserver/middleware.go index fc7e60e8e2..a234f36cb3 100644 --- a/internal/apiserver/middleware.go +++ b/internal/apiserver/middleware.go @@ -1,7 +1,7 @@ package apiserver /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/apiserver/namespaceservice/namespaceimpl.go b/internal/apiserver/namespaceservice/namespaceimpl.go index 36af3b14e2..ac56c05671 100644 --- a/internal/apiserver/namespaceservice/namespaceimpl.go +++ b/internal/apiserver/namespaceservice/namespaceimpl.go @@ -1,7 +1,7 @@ package namespaceservice /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -36,7 +36,7 @@ func ShowNamespace(clientset kubernetes.Interface, username string, request *msg resp.Username = username resp.Results = make([]msgs.NamespaceResult, 0) - //namespaceList := util.GetNamespaces() + // namespaceList := util.GetNamespaces() nsList := make([]string, 0) @@ -91,14 +91,13 @@ func ShowNamespace(clientset kubernetes.Interface, username string, request *msg // CreateNamespace ... func CreateNamespace(clientset kubernetes.Interface, createdBy string, request *msgs.CreateNamespaceRequest) msgs.CreateNamespaceResponse { - log.Debugf("CreateNamespace %v", request) resp := msgs.CreateNamespaceResponse{} resp.Status.Code = msgs.Ok resp.Status.Msg = "" resp.Results = make([]string, 0) - //iterate thru all the args (namespace names) + // iterate thru all the args (namespace names) for _, namespace := range request.Args { if err := ns.CreateNamespace(clientset, apiserver.InstallationName, @@ -112,7 +111,6 @@ func CreateNamespace(clientset kubernetes.Interface, createdBy string, request * } return resp - } // DeleteNamespace ... @@ -125,7 +123,6 @@ func DeleteNamespace(clientset kubernetes.Interface, deletedBy string, request * for _, namespace := range request.Args { err := ns.DeleteNamespace(clientset, apiserver.InstallationName, apiserver.PgoNamespace, deletedBy, namespace) - if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() @@ -136,19 +133,17 @@ func DeleteNamespace(clientset kubernetes.Interface, deletedBy string, request * } return resp - } // UpdateNamespace ... func UpdateNamespace(clientset kubernetes.Interface, updatedBy string, request *msgs.UpdateNamespaceRequest) msgs.UpdateNamespaceResponse { - log.Debugf("UpdateNamespace %v", request) resp := msgs.UpdateNamespaceResponse{} resp.Status.Code = msgs.Ok resp.Status.Msg = "" resp.Results = make([]string, 0) - //iterate thru all the args (namespace names) + // iterate thru all the args (namespace names) for _, namespace := range request.Args { if err := ns.UpdateNamespace(clientset, apiserver.InstallationName, @@ -162,5 +157,4 @@ func UpdateNamespace(clientset kubernetes.Interface, updatedBy string, request * } return resp - } diff --git a/internal/apiserver/namespaceservice/namespaceservice.go b/internal/apiserver/namespaceservice/namespaceservice.go index 1e27294c96..be5133b2d3 100644 --- a/internal/apiserver/namespaceservice/namespaceservice.go +++ b/internal/apiserver/namespaceservice/namespaceservice.go @@ -1,7 +1,7 @@ package namespaceservice /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -77,12 +77,12 @@ func ShowNamespaceHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ShowNamespace(apiserver.Clientset, username, &request) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } func CreateNamespaceHandler(w http.ResponseWriter, r *http.Request) { @@ -132,12 +132,12 @@ func CreateNamespaceHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = CreateNamespace(apiserver.Clientset, username, &request) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } func DeleteNamespaceHandler(w http.ResponseWriter, r *http.Request) { @@ -187,14 +187,14 @@ func DeleteNamespaceHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = DeleteNamespace(apiserver.Clientset, username, &request) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } + func UpdateNamespaceHandler(w http.ResponseWriter, r *http.Request) { // swagger:operation POST /namespaceupdate namespaceservice namespaceupdate /*``` @@ -242,10 +242,10 @@ func UpdateNamespaceHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = UpdateNamespace(apiserver.Clientset, username, &request) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/perms.go b/internal/apiserver/perms.go index 48c72099a6..b5bccc3403 100644 --- a/internal/apiserver/perms.go +++ b/internal/apiserver/perms.go @@ -1,7 +1,7 @@ package apiserver /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -40,7 +40,6 @@ const ( CREATE_CLUSTER_PERM = "CreateCluster" CREATE_DUMP_PERM = "CreateDump" CREATE_FAILOVER_PERM = "CreateFailover" - CREATE_INGEST_PERM = "CreateIngest" CREATE_NAMESPACE_PERM = "CreateNamespace" CREATE_PGADMIN_PERM = "CreatePgAdmin" CREATE_PGBOUNCER_PERM = "CreatePgbouncer" @@ -57,7 +56,6 @@ const ( // DELETE DELETE_BACKUP_PERM = "DeleteBackup" DELETE_CLUSTER_PERM = "DeleteCluster" - DELETE_INGEST_PERM = "DeleteIngest" DELETE_NAMESPACE_PERM = "DeleteNamespace" DELETE_PGADMIN_PERM = "DeletePgAdmin" DELETE_PGBOUNCER_PERM = "DeletePgbouncer" @@ -71,7 +69,6 @@ const ( SHOW_BACKUP_PERM = "ShowBackup" SHOW_CLUSTER_PERM = "ShowCluster" SHOW_CONFIG_PERM = "ShowConfig" - SHOW_INGEST_PERM = "ShowIngest" SHOW_NAMESPACE_PERM = "ShowNamespace" SHOW_PGADMIN_PERM = "ShowPgAdmin" SHOW_PGBOUNCER_PERM = "ShowPgBouncer" @@ -97,8 +94,10 @@ const ( UPDATE_USER_PERM = "UpdateUser" ) -var RoleMap map[string]map[string]string -var PermMap map[string]string +var ( + RoleMap map[string]map[string]string + PermMap map[string]string +) func initializePerms() { RoleMap = make(map[string]map[string]string) @@ -112,6 +111,7 @@ func initializePerms() { DF_CLUSTER_PERM: "yes", LABEL_PERM: "yes", RELOAD_PERM: "yes", + RESTART_PERM: "yes", RESTORE_PERM: "yes", STATUS_PERM: "yes", TEST_CLUSTER_PERM: "yes", @@ -122,7 +122,6 @@ func initializePerms() { CREATE_DUMP_PERM: "yes", CREATE_CLUSTER_PERM: "yes", CREATE_FAILOVER_PERM: "yes", - CREATE_INGEST_PERM: "yes", CREATE_NAMESPACE_PERM: "yes", CREATE_PGADMIN_PERM: "yes", CREATE_PGBOUNCER_PERM: "yes", @@ -139,7 +138,6 @@ func initializePerms() { // DELETE DELETE_BACKUP_PERM: "yes", DELETE_CLUSTER_PERM: "yes", - DELETE_INGEST_PERM: "yes", DELETE_NAMESPACE_PERM: "yes", DELETE_PGADMIN_PERM: "yes", DELETE_PGBOUNCER_PERM: "yes", @@ -153,7 +151,6 @@ func initializePerms() { SHOW_BACKUP_PERM: "yes", SHOW_CLUSTER_PERM: "yes", SHOW_CONFIG_PERM: "yes", - SHOW_INGEST_PERM: "yes", SHOW_NAMESPACE_PERM: "yes", SHOW_PGADMIN_PERM: "yes", SHOW_PGBOUNCER_PERM: "yes", @@ -180,5 +177,4 @@ func initializePerms() { } log.Infof("loading PermMap with %d Permissions\n", len(PermMap)) - } diff --git a/internal/apiserver/pgadminservice/pgadminimpl.go b/internal/apiserver/pgadminservice/pgadminimpl.go index 4f23a8d028..cc2494269a 100644 --- a/internal/apiserver/pgadminservice/pgadminimpl.go +++ b/internal/apiserver/pgadminservice/pgadminimpl.go @@ -1,7 +1,7 @@ package pgadminservice /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -183,7 +183,6 @@ func ShowPgAdmin(request *msgs.ShowPgAdminRequest, namespace string) msgs.ShowPg // try to get the list of clusters. if there is an error, put it into the // status and return clusterList, err := getClusterList(request.Namespace, request.ClusterNames, request.Selector) - if err != nil { response.SetError(err.Error()) return response @@ -191,7 +190,8 @@ func ShowPgAdmin(request *msgs.ShowPgAdminRequest, namespace string) msgs.ShowPg // iterate through the list of clusters to get the relevant pgAdmin // information about them - for _, cluster := range clusterList.Items { + for i := range clusterList.Items { + cluster := &clusterList.Items[i] result := msgs.ShowPgAdminDetail{ ClusterName: cluster.Spec.Name, HasPgAdmin: true, @@ -228,7 +228,7 @@ func ShowPgAdmin(request *msgs.ShowPgAdminRequest, namespace string) msgs.ShowPg // In the future, construct results to contain individual error stati // for now log and return empty content if encountered - qr, err := pgadmin.GetPgAdminQueryRunner(apiserver.Clientset, apiserver.RESTConfig, &cluster) + qr, err := pgadmin.GetPgAdminQueryRunner(apiserver.Clientset, apiserver.RESTConfig, cluster) if err != nil { log.Error(err) continue @@ -267,8 +267,7 @@ func getClusterList(namespace string, clusterNames []string, selector string) (c cl, err := apiserver.Clientset. CrunchydataV1().Pgclusters(namespace). List(ctx, metav1.ListOptions{LabelSelector: selector}) - - // if there is an error, return here with an empty cluster list + // if there is an error, return here with an empty cluster list if err != nil { return crv1.PgclusterList{}, err } @@ -278,7 +277,6 @@ func getClusterList(namespace string, clusterNames []string, selector string) (c // now try to get clusters based specific cluster names for _, clusterName := range clusterNames { cluster, err := apiserver.Clientset.CrunchydataV1().Pgclusters(namespace).Get(ctx, clusterName, metav1.GetOptions{}) - // if there is an error, capture it here and return here with an empty list if err != nil { return crv1.PgclusterList{}, err diff --git a/internal/apiserver/pgadminservice/pgadminservice.go b/internal/apiserver/pgadminservice/pgadminservice.go index 90378868ca..9c09cc884c 100644 --- a/internal/apiserver/pgadminservice/pgadminservice.go +++ b/internal/apiserver/pgadminservice/pgadminservice.go @@ -1,7 +1,7 @@ package pgadminservice /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -63,20 +63,19 @@ func CreatePgAdminHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.SetError(apiserver.VERSION_MISMATCH_ERROR) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.SetError(err.Error()) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = CreatePgAdmin(&request, ns, username) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // DeletePgAdminHandler ... @@ -117,20 +116,19 @@ func DeletePgAdminHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.SetError(apiserver.VERSION_MISMATCH_ERROR) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.SetError(err.Error()) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = DeletePgAdmin(&request, ns) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // ShowPgAdminHandler is the HTTP handler to get information about a pgBouncer @@ -173,21 +171,19 @@ func ShowPgAdminHandler(w http.ResponseWriter, r *http.Request) { // ensure the versions align... if request.ClientVersion != msgs.PGO_VERSION { resp.SetError(apiserver.VERSION_MISMATCH_ERROR) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } // ensure the namespace being used exists namespace, err := apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) - if err != nil { resp.SetError(err.Error()) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } // get the information about a pgAdmin deployment(s) resp = ShowPgAdmin(&request, namespace) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/pgbouncerservice/pgbouncerimpl.go b/internal/apiserver/pgbouncerservice/pgbouncerimpl.go index 6ce41ac784..ae01a855dc 100644 --- a/internal/apiserver/pgbouncerservice/pgbouncerimpl.go +++ b/internal/apiserver/pgbouncerservice/pgbouncerimpl.go @@ -1,7 +1,7 @@ package pgbouncerservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -72,15 +72,14 @@ func CreatePgbouncer(request *msgs.CreatePgbouncerRequest, ns, pgouser string) m // try to get the list of clusters. if there is an error, put it into the // status and return clusterList, err := getClusterList(request.Namespace, request.Args, request.Selector) - if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() return resp } - for _, cluster := range clusterList.Items { - + for i := range clusterList.Items { + cluster := clusterList.Items[i] // check if the current cluster is not upgraded to the deployed // Operator version. If not, do not allow the command to complete if cluster.Annotations[config.ANNOTATION_IS_UPGRADED] == config.ANNOTATIONS_FALSE { @@ -89,6 +88,13 @@ func CreatePgbouncer(request *msgs.CreatePgbouncerRequest, ns, pgouser string) m return resp } + // validate the TLS settings + if err := validateTLS(cluster, request); err != nil { + resp.Status.Code = msgs.Error + resp.Status.Msg = err.Error() + return resp + } + log.Debugf("adding pgbouncer to cluster [%s]", cluster.Name) resources := v1.ResourceList{} @@ -103,6 +109,17 @@ func CreatePgbouncer(request *msgs.CreatePgbouncerRequest, ns, pgouser string) m cluster.Spec.PgBouncer.Replicas = request.Replicas } + // set the optional ServiceType parameter + switch request.ServiceType { + default: + resp.Status.Code = msgs.Error + resp.Status.Msg = fmt.Sprintf("invalid service type %q", request.ServiceType) + return resp + case v1.ServiceTypeClusterIP, v1.ServiceTypeNodePort, + v1.ServiceTypeLoadBalancer, v1.ServiceTypeExternalName, "": + cluster.Spec.PgBouncer.ServiceType = request.ServiceType + } + // if the request has overriding CPU/memory parameters, // these will take precedence over the defaults if request.CPULimit != "" { @@ -132,6 +149,8 @@ func CreatePgbouncer(request *msgs.CreatePgbouncerRequest, ns, pgouser string) m } cluster.Spec.PgBouncer.Resources = resources + cluster.Spec.PgBouncer.Limits = limits + cluster.Spec.PgBouncer.TLSSecret = request.TLSSecret // update the cluster CRD with these udpates. If there is an error if _, err := apiserver.Clientset.CrunchydataV1().Pgclusters(request.Namespace). @@ -163,7 +182,6 @@ func DeletePgbouncer(request *msgs.DeletePgbouncerRequest, ns string) msgs.Delet // try to get the list of clusters. if there is an error, put it into the // status and return clusterList, err := getClusterList(request.Namespace, request.Args, request.Selector) - if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() @@ -184,7 +202,8 @@ func DeletePgbouncer(request *msgs.DeletePgbouncerRequest, ns string) msgs.Delet return resp } - for _, cluster := range clusterList.Items { + for i := range clusterList.Items { + cluster := clusterList.Items[i] log.Debugf("deleting pgbouncer from cluster [%s]", cluster.Name) // check to see if the uninstall flag was set. If it was, apply the update @@ -219,7 +238,6 @@ func DeletePgbouncer(request *msgs.DeletePgbouncerRequest, ns string) msgs.Delet } return resp - } // ShowPgBouncer gets information about a PostgreSQL cluster's pgBouncer @@ -242,7 +260,6 @@ func ShowPgBouncer(request *msgs.ShowPgBouncerRequest, namespace string) msgs.Sh // try to get the list of clusters. if there is an error, put it into the // status and return clusterList, err := getClusterList(request.Namespace, request.ClusterNames, request.Selector) - if err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() @@ -324,7 +341,6 @@ func UpdatePgBouncer(request *msgs.UpdatePgBouncerRequest, namespace, pgouser st // try to get the list of clusters. if there is an error, put it into the // status and return clusterList, err := getClusterList(request.Namespace, request.ClusterNames, request.Selector) - if err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() @@ -345,7 +361,8 @@ func UpdatePgBouncer(request *msgs.UpdatePgBouncerRequest, namespace, pgouser st // iterate through the list of clusters to get the relevant pgBouncer // information about them - for _, cluster := range clusterList.Items { + for i := range clusterList.Items { + cluster := clusterList.Items[i] result := msgs.UpdatePgBouncerDetail{ ClusterName: cluster.Spec.Name, HasPgBouncer: true, @@ -368,6 +385,19 @@ func UpdatePgBouncer(request *msgs.UpdatePgBouncerRequest, namespace, pgouser st } } + // set the optional ServiceType parameter + switch request.ServiceType { + default: + result.Error = true + result.ErrorMessage = fmt.Sprintf("invalid service type %q", request.ServiceType) + response.Results = append(response.Results, result) + continue + case v1.ServiceTypeClusterIP, v1.ServiceTypeNodePort, + v1.ServiceTypeLoadBalancer, v1.ServiceTypeExternalName: + cluster.Spec.PgBouncer.ServiceType = request.ServiceType + case "": // no-op, well, no change + } + // ensure the Resources/Limits are non-nil if cluster.Spec.PgBouncer.Resources == nil { cluster.Spec.PgBouncer.Resources = v1.ResourceList{} @@ -442,7 +472,6 @@ func getClusterList(namespace string, clusterNames []string, selector string) (c // of arguments...or both. First, start with the selector if selector != "" { cl, err := apiserver.Clientset.CrunchydataV1().Pgclusters(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) - // if there is an error, return here with an empty cluster list if err != nil { return crv1.PgclusterList{}, err @@ -453,7 +482,6 @@ func getClusterList(namespace string, clusterNames []string, selector string) (c // now try to get clusters based specific cluster names for _, clusterName := range clusterNames { cluster, err := apiserver.Clientset.CrunchydataV1().Pgclusters(namespace).Get(ctx, clusterName, metav1.GetOptions{}) - // if there is an error, capture it here and return here with an empty list if err != nil { return crv1.PgclusterList{}, err @@ -483,7 +511,6 @@ func setPgBouncerPasswordDetail(cluster crv1.Pgcluster, result *msgs.ShowPgBounc // attempt to get the secret, but only get the password password, err := util.GetPasswordFromSecret(apiserver.Clientset, cluster.Spec.Namespace, pgBouncerSecretName) - if err != nil { log.Warn(err) } @@ -503,8 +530,7 @@ func setPgBouncerServiceDetail(cluster crv1.Pgcluster, result *msgs.ShowPgBounce services, err := apiserver.Clientset. CoreV1().Services(cluster.Spec.Namespace). List(ctx, metav1.ListOptions{LabelSelector: selector}) - - // if there is an error, return without making any adjustments + // if there is an error, return without making any adjustments if err != nil { log.Warn(err) return @@ -534,3 +560,40 @@ func setPgBouncerServiceDetail(cluster crv1.Pgcluster, result *msgs.ShowPgBounce } } } + +// validateTLS validates the parameters that allow a user to enable TLS +// connections to a pgBouncer cluster. In essence, it requires both the +// TLSSecret to be set for pgBouncer as well as a CASecret/TLSSecret for the +// cluster itself +func validateTLS(cluster crv1.Pgcluster, request *msgs.CreatePgbouncerRequest) error { + ctx := context.TODO() + + // if TLSSecret is not set, well, this is valid + if request.TLSSecret == "" { + return nil + } + + // if ReplicationTLSSecret is set, but neither TLSSecret nor CASecret is not + // set, then return + if request.TLSSecret != "" && (cluster.Spec.TLS.TLSSecret == "" || cluster.Spec.TLS.CASecret == "") { + return fmt.Errorf("%s: both TLS secret and CA secret must be set on the cluster in order to enable TLS for pgBouncer", cluster.Name) + } + + // ensure the TLSSecret and CASecret for the cluster are actually present + // now check for the existence of the two secrets + // First the TLS secret + if _, err := apiserver.Clientset. + CoreV1().Secrets(cluster.Namespace). + Get(ctx, cluster.Spec.TLS.TLSSecret, metav1.GetOptions{}); err != nil { + return fmt.Errorf("%s: cannot find TLS secret for cluster: %w", cluster.Name, err) + } + + if _, err := apiserver.Clientset. + CoreV1().Secrets(cluster.Namespace). + Get(ctx, cluster.Spec.TLS.CASecret, metav1.GetOptions{}); err != nil { + return fmt.Errorf("%s: cannot find CA secret for cluster: %w", cluster.Name, err) + } + + // after this, we are validated! + return nil +} diff --git a/internal/apiserver/pgbouncerservice/pgbouncerservice.go b/internal/apiserver/pgbouncerservice/pgbouncerservice.go index 969aabd205..0d025d6e5b 100644 --- a/internal/apiserver/pgbouncerservice/pgbouncerservice.go +++ b/internal/apiserver/pgbouncerservice/pgbouncerservice.go @@ -1,7 +1,7 @@ package pgbouncerservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,10 +17,11 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) // CreatePgbouncerHandler ... @@ -63,7 +64,7 @@ func CreatePgbouncerHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -71,13 +72,12 @@ func CreatePgbouncerHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = CreatePgbouncer(&request, ns, username) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } /* The delete pgboucner handler is setup to be used by two different routes. To keep @@ -141,7 +141,7 @@ func DeletePgbouncerHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -149,13 +149,12 @@ func DeletePgbouncerHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = DeletePgbouncer(&request, ns) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // ShowPgBouncerHandler is the HTTP handler to get information about a pgBouncer @@ -182,7 +181,6 @@ func ShowPgBouncerHandler(w http.ResponseWriter, r *http.Request) { // first, determine if the user is authorized to access this resource username, err := apiserver.Authn(apiserver.SHOW_PGBOUNCER_PERM, w, r) - if err != nil { return } @@ -202,13 +200,12 @@ func ShowPgBouncerHandler(w http.ResponseWriter, r *http.Request) { Msg: apiserver.VERSION_MISMATCH_ERROR, }, } - json.NewEncoder(w).Encode(response) + _ = json.NewEncoder(w).Encode(response) return } // ensure the namespace being used exists namespace, err := apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) - if err != nil { response := msgs.ShowPgBouncerResponse{ Status: msgs.Status{ @@ -216,14 +213,13 @@ func ShowPgBouncerHandler(w http.ResponseWriter, r *http.Request) { Msg: err.Error(), }, } - json.NewEncoder(w).Encode(response) + _ = json.NewEncoder(w).Encode(response) return } // get the information about a pgbouncer deployment(s) response := ShowPgBouncer(&request, namespace) - json.NewEncoder(w).Encode(response) - + _ = json.NewEncoder(w).Encode(response) } // UpdatePgBouncerHandler is the HTTP handler to perform update tasks on a @@ -250,7 +246,6 @@ func UpdatePgBouncerHandler(w http.ResponseWriter, r *http.Request) { // first, determine if the user is authorized to access this resource username, err := apiserver.Authn(apiserver.UPDATE_PGBOUNCER_PERM, w, r) - if err != nil { return } @@ -270,13 +265,12 @@ func UpdatePgBouncerHandler(w http.ResponseWriter, r *http.Request) { Msg: apiserver.VERSION_MISMATCH_ERROR, }, } - json.NewEncoder(w).Encode(response) + _ = json.NewEncoder(w).Encode(response) return } // ensure the namespace being used exists namespace, err := apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) - if err != nil { response := msgs.UpdatePgBouncerResponse{ Status: msgs.Status{ @@ -284,11 +278,11 @@ func UpdatePgBouncerHandler(w http.ResponseWriter, r *http.Request) { Msg: err.Error(), }, } - json.NewEncoder(w).Encode(response) + _ = json.NewEncoder(w).Encode(response) return } // get the information about a pgbouncer deployment(s) response := UpdatePgBouncer(&request, namespace, username) - json.NewEncoder(w).Encode(response) + _ = json.NewEncoder(w).Encode(response) } diff --git a/internal/apiserver/pgdumpservice/pgdumpimpl.go b/internal/apiserver/pgdumpservice/pgdumpimpl.go index 4a5f1a5d42..ebe0e84c2a 100644 --- a/internal/apiserver/pgdumpservice/pgdumpimpl.go +++ b/internal/apiserver/pgdumpservice/pgdumpimpl.go @@ -1,7 +1,7 @@ package pgdumpservice /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,7 +17,6 @@ limitations under the License. import ( "context" - "errors" "fmt" "strconv" "strings" @@ -28,13 +27,14 @@ import ( crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const pgDumpTaskExtension = "-pgdump" -const pgDumpJobExtension = "-pgdump-job" +const ( + pgDumpTaskExtension = "-pgdump" + pgDumpJobExtension = "-pgdump-job" +) // CreateBackup ... // pgo backup mycluster @@ -50,7 +50,7 @@ func CreatepgDump(request *msgs.CreatepgDumpBackupRequest, ns string) msgs.Creat log.Debug("CreatePgDump storage config... " + request.StorageConfig) if request.StorageConfig != "" { - if apiserver.IsValidStorageName(request.StorageConfig) == false { + if !apiserver.IsValidStorageName(request.StorageConfig) { log.Debug("CreateBackup sc error is found " + request.StorageConfig) resp.Status.Code = msgs.Error resp.Status.Msg = request.StorageConfig + " Storage config was not found " @@ -68,7 +68,7 @@ func CreatepgDump(request *msgs.CreatepgDumpBackupRequest, ns string) msgs.Creat } if request.Selector != "" { - //use the selector instead of an argument list to filter on + // use the selector instead of an argument list to filter on clusterList, err := apiserver.Clientset. CrunchydataV1().Pgclusters(ns). @@ -117,7 +117,7 @@ func CreatepgDump(request *msgs.CreatepgDumpBackupRequest, ns string) msgs.Creat } deletePropagation := metav1.DeletePropagationForeground - apiserver.Clientset. + _ = apiserver.Clientset. BatchV1().Jobs(ns). Delete(ctx, clusterName+pgDumpJobExtension, metav1.DeleteOptions{PropagationPolicy: &deletePropagation}) @@ -132,9 +132,8 @@ func CreatepgDump(request *msgs.CreatepgDumpBackupRequest, ns string) msgs.Creat } else { log.Debugf("pgtask %s was found so we will recreate it", taskName) - //remove the existing pgtask + // remove the existing pgtask err := apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Delete(ctx, taskName, metav1.DeleteOptions{}) - if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() @@ -142,21 +141,9 @@ func CreatepgDump(request *msgs.CreatepgDumpBackupRequest, ns string) msgs.Creat } } - //get pod name from cluster - // var podname, deployName string - var podname string - podname, err = getPrimaryPodName(cluster, ns) - - if err != nil { - log.Error(err) - resp.Status.Code = msgs.Error - resp.Status.Msg = err.Error() - return resp - } - // where all the magic happens about the task. // TODO: Needs error handling for invalid parameters in the request - theTask := buildPgTaskForDump(clusterName, taskName, crv1.PgtaskpgDump, podname, "database", request) + theTask := buildPgTaskForDump(clusterName, taskName, crv1.PgtaskpgDump, "database", request) _, err = apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Create(ctx, theTask, metav1.CreateOptions{}) if err != nil { @@ -194,7 +181,7 @@ func ShowpgDump(clusterName string, selector string, ns string) msgs.ShowBackupR } } - //get a list of all clusters + // get a list of all clusters clusterList, err := apiserver.Clientset. CrunchydataV1().Pgclusters(ns). List(ctx, metav1.ListOptions{LabelSelector: selector}) @@ -217,7 +204,7 @@ func ShowpgDump(clusterName string, selector string, ns string) msgs.ShowBackupR pgTaskName := "backup-" + c.Name + pgDumpTaskExtension - backupItem, error := getPgBackupForTask(c.Name, pgTaskName, ns) + backupItem, error := getPgBackupForTask(pgTaskName, ns) if backupItem != nil { log.Debugf("pgTask %s was found", pgTaskName) @@ -238,13 +225,11 @@ func ShowpgDump(clusterName string, selector string, ns string) msgs.ShowBackupR } return response - } // builds out a pgTask structure that can be handed to kube -func buildPgTaskForDump(clusterName, taskName, action, podName, containerName string, +func buildPgTaskForDump(clusterName, taskName, action, containerName string, request *msgs.CreatepgDumpBackupRequest) *crv1.Pgtask { - var newInstance *crv1.Pgtask var storageSpec crv1.PgStorageSpec var pvcName string @@ -298,50 +283,6 @@ func buildPgTaskForDump(clusterName, taskName, action, podName, containerName st return newInstance } -func getPrimaryPodName(cluster *crv1.Pgcluster, ns string) (string, error) { - ctx := context.TODO() - var podname string - - selector := config.LABEL_SERVICE_NAME + "=" + cluster.Spec.Name - - pods, err := apiserver.Clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: selector}) - if err != nil { - return podname, err - } - - for _, p := range pods.Items { - if isPrimary(&p, cluster.Spec.Name) && isReady(&p) { - return p.Name, err - } - } - - return podname, errors.New("primary pod is not in Ready state") -} - -func isPrimary(pod *v1.Pod, clusterName string) bool { - if pod.ObjectMeta.Labels[config.LABEL_SERVICE_NAME] == clusterName { - return true - } - return false - -} - -func isReady(pod *v1.Pod) bool { - readyCount := 0 - containerCount := 0 - for _, stat := range pod.Status.ContainerStatuses { - containerCount++ - if stat.Ready { - readyCount++ - } - } - if readyCount != containerCount { - return false - } - return true - -} - // dumpAllFlag, dumpOpts = parseOptionFlags(request.BackupOpt) func parseOptionFlags(allFlags string) (bool, string) { dumpFlag := false @@ -353,14 +294,12 @@ func parseOptionFlags(allFlags string) (bool, string) { options := strings.Split(allFlags, " ") for _, token := range options { - // handle dump flag if strings.Contains(token, "--dump-all") { dumpFlag = true } else { parsedOptions = append(parsedOptions, token) } - } optionString := strings.Join(parsedOptions, " ") @@ -368,11 +307,10 @@ func parseOptionFlags(allFlags string) (bool, string) { log.Debugf("pgdump optionFlags: %s, dumpAll: %t", optionString, dumpFlag) return dumpFlag, optionString - } // if backup && err are nil, it simply wasn't found. Otherwise found or an error -func getPgBackupForTask(clusterName string, taskName string, ns string) (*msgs.Pgbackup, error) { +func getPgBackupForTask(taskName, ns string) (*msgs.Pgbackup, error) { ctx := context.TODO() task, err := apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Get(ctx, taskName, metav1.GetOptions{}) @@ -388,7 +326,6 @@ func getPgBackupForTask(clusterName string, taskName string, ns string) (*msgs.P // converts pgTask to a pgBackup structure func buildPgBackupFrompgTask(dumpTask *crv1.Pgtask) *msgs.Pgbackup { - backup := msgs.Pgbackup{} spec := dumpTask.Spec @@ -461,7 +398,7 @@ func Restore(request *msgs.PgRestoreRequest, ns string) msgs.PgRestoreResponse { return resp } - //delete any existing pgtask with the same name + // delete any existing pgtask with the same name err = apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Delete(ctx, pgtask.Name, metav1.DeleteOptions{}) if err != nil && !kerrors.IsNotFound(err) { resp.Status.Code = msgs.Error @@ -469,7 +406,7 @@ func Restore(request *msgs.PgRestoreRequest, ns string) msgs.PgRestoreResponse { return resp } - //create a pgtask for the restore workflow + // create a pgtask for the restore workflow _, err = apiserver.Clientset.CrunchydataV1().Pgtasks(ns).Create(ctx, pgtask, metav1.CreateOptions{}) if err != nil { resp.Status.Code = msgs.Error @@ -484,7 +421,6 @@ func Restore(request *msgs.PgRestoreRequest, ns string) msgs.PgRestoreResponse { // builds out a pgTask structure that can be handed to kube func buildPgTaskForRestore(taskName string, action string, request *msgs.PgRestoreRequest) (*crv1.Pgtask, error) { - var newInstance *crv1.Pgtask var storageSpec crv1.PgStorageSpec @@ -520,6 +456,12 @@ func buildPgTaskForRestore(taskName string, action string, request *msgs.PgResto spec.Parameters[config.LABEL_NODE_LABEL_KEY] = parts[0] spec.Parameters[config.LABEL_NODE_LABEL_VALUE] = parts[1] + // determine if any special node affinity type must be set + spec.Parameters[config.LABEL_NODE_AFFINITY_TYPE] = "preferred" + if request.NodeAffinityType == crv1.NodeAffinityTypeRequired { + spec.Parameters[config.LABEL_NODE_AFFINITY_TYPE] = "required" + } + log.Debug("Restore node labels used from user entered flag") } diff --git a/internal/apiserver/pgdumpservice/pgdumpservice.go b/internal/apiserver/pgdumpservice/pgdumpservice.go index 755a9bbd98..5be602b811 100644 --- a/internal/apiserver/pgdumpservice/pgdumpservice.go +++ b/internal/apiserver/pgdumpservice/pgdumpservice.go @@ -1,7 +1,7 @@ package pgdumpservice /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,12 +17,13 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" "github.com/crunchydata/postgres-operator/internal/config" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" "github.com/gorilla/mux" log "github.com/sirupsen/logrus" - "net/http" ) // BackupHandler ... @@ -66,12 +67,12 @@ func BackupHandler(w http.ResponseWriter, r *http.Request) { ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = CreatepgDump(&request, ns) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } // ShowpgDumpHandler ... @@ -135,7 +136,7 @@ func ShowDumpHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -143,13 +144,12 @@ func ShowDumpHandler(w http.ResponseWriter, r *http.Request) { ns, err = apiserver.GetNamespace(apiserver.Clientset, username, namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ShowpgDump(clustername, selector, ns) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // RestoreHandler ... @@ -195,7 +195,7 @@ func RestoreHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -205,5 +205,5 @@ func RestoreHandler(w http.ResponseWriter, r *http.Request) { resp.Status.Msg = err.Error() } - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/pgoroleservice/pgoroleimpl.go b/internal/apiserver/pgoroleservice/pgoroleimpl.go index 47f3c11502..733d1b5301 100644 --- a/internal/apiserver/pgoroleservice/pgoroleimpl.go +++ b/internal/apiserver/pgoroleservice/pgoroleimpl.go @@ -1,7 +1,7 @@ package pgoroleservice /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -34,7 +34,6 @@ import ( // CreatePgorole ... func CreatePgorole(clientset kubernetes.Interface, createdBy string, request *msgs.CreatePgoroleRequest) msgs.CreatePgoroleResponse { - log.Debugf("CreatePgorole %v", request) resp := msgs.CreatePgoroleResponse{} resp.Status.Code = msgs.Ok @@ -54,7 +53,7 @@ func CreatePgorole(clientset kubernetes.Interface, createdBy string, request *ms return resp } - //publish event + // publish event topics := make([]string, 1) topics[0] = events.EventTopicPGOUser @@ -77,7 +76,6 @@ func CreatePgorole(clientset kubernetes.Interface, createdBy string, request *ms } return resp - } // ShowPgorole ... @@ -122,7 +120,6 @@ func ShowPgorole(clientset kubernetes.Interface, request *msgs.ShowPgoroleReques } return resp - } // DeletePgorole ... @@ -164,7 +161,6 @@ func DeletePgorole(clientset kubernetes.Interface, deletedBy string, request *ms } return resp - } func UpdatePgorole(clientset kubernetes.Interface, updatedBy string, request *msgs.UpdatePgoroleRequest) msgs.UpdatePgoroleResponse { @@ -200,7 +196,7 @@ func UpdatePgorole(clientset kubernetes.Interface, updatedBy string, request *ms return resp } - //publish event + // publish event topics := make([]string, 1) topics[0] = events.EventTopicPGOUser @@ -223,13 +219,12 @@ func UpdatePgorole(clientset kubernetes.Interface, updatedBy string, request *ms } return resp - } func createSecret(clientset kubernetes.Interface, createdBy, pgorolename, permissions string) error { ctx := context.TODO() - var enRolename = pgorolename + enRolename := pgorolename secretName := "pgorole-" + pgorolename @@ -269,7 +264,7 @@ func validPermissions(perms string) error { func deleteRoleFromUsers(clientset kubernetes.Interface, roleName string) error { ctx := context.TODO() - //get pgouser Secrets + // get pgouser Secrets selector := config.LABEL_PGO_PGOUSER + "=true" pgouserSecrets, err := clientset. @@ -280,7 +275,8 @@ func deleteRoleFromUsers(clientset kubernetes.Interface, roleName string) error return err } - for _, s := range pgouserSecrets.Items { + for i := range pgouserSecrets.Items { + s := &pgouserSecrets.Items[i] rolesString := string(s.Data[pgouserservice.MAP_KEY_ROLES]) roles := strings.Split(rolesString, ",") resultRoles := make([]string, 0) @@ -294,7 +290,7 @@ func deleteRoleFromUsers(clientset kubernetes.Interface, roleName string) error } } - //update the pgouser Secret removing any roles as necessary + // update the pgouser Secret removing any roles as necessary if rolesUpdated { var resultingRoleString string @@ -307,8 +303,7 @@ func deleteRoleFromUsers(clientset kubernetes.Interface, roleName string) error } s.Data[pgouserservice.MAP_KEY_ROLES] = []byte(resultingRoleString) - _, err = clientset.CoreV1().Secrets(apiserver.PgoNamespace).Update(ctx, &s, metav1.UpdateOptions{}) - if err != nil { + if _, err := clientset.CoreV1().Secrets(apiserver.PgoNamespace).Update(ctx, s, metav1.UpdateOptions{}); err != nil { return err } diff --git a/internal/apiserver/pgoroleservice/pgoroleservice.go b/internal/apiserver/pgoroleservice/pgoroleservice.go index b3e3413e09..6ba24d1b77 100644 --- a/internal/apiserver/pgoroleservice/pgoroleservice.go +++ b/internal/apiserver/pgoroleservice/pgoroleservice.go @@ -1,7 +1,7 @@ package pgoroleservice /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,11 +17,12 @@ limitations under the License. import ( "encoding/json" + "net/http" + apiserver "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/validation" - "net/http" ) func CreatePgoroleHandler(w http.ResponseWriter, r *http.Request) { @@ -63,7 +64,7 @@ func CreatePgoroleHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -75,7 +76,7 @@ func CreatePgoroleHandler(w http.ResponseWriter, r *http.Request) { resp = CreatePgorole(apiserver.Clientset, rolename, &request) } - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } func DeletePgoroleHandler(w http.ResponseWriter, r *http.Request) { @@ -117,14 +118,13 @@ func DeletePgoroleHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = DeletePgorole(apiserver.Clientset, rolename, &request) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } func ShowPgoroleHandler(w http.ResponseWriter, r *http.Request) { @@ -167,14 +167,13 @@ func ShowPgoroleHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ShowPgorole(apiserver.Clientset, &request) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } func UpdatePgoroleHandler(w http.ResponseWriter, r *http.Request) { @@ -213,5 +212,5 @@ func UpdatePgoroleHandler(w http.ResponseWriter, r *http.Request) { resp.Status = msgs.Status{Code: msgs.Ok, Msg: ""} resp = UpdatePgorole(apiserver.Clientset, rolename, &request) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/pgouserservice/pgouserimpl.go b/internal/apiserver/pgouserservice/pgouserimpl.go index aaa94fbc00..c8c80043f3 100644 --- a/internal/apiserver/pgouserservice/pgouserimpl.go +++ b/internal/apiserver/pgouserservice/pgouserimpl.go @@ -1,7 +1,7 @@ package pgouserservice /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -32,14 +32,15 @@ import ( "k8s.io/client-go/kubernetes" ) -const MAP_KEY_USERNAME = "username" -const MAP_KEY_PASSWORD = "password" -const MAP_KEY_ROLES = "roles" -const MAP_KEY_NAMESPACES = "namespaces" +const ( + MAP_KEY_USERNAME = "username" + MAP_KEY_PASSWORD = "password" + MAP_KEY_ROLES = "roles" + MAP_KEY_NAMESPACES = "namespaces" +) // CreatePgouser ... func CreatePgouser(clientset kubernetes.Interface, createdBy string, request *msgs.CreatePgouserRequest) msgs.CreatePgouserResponse { - log.Debugf("CreatePgouser %v", request) resp := msgs.CreatePgouserResponse{} resp.Status.Code = msgs.Ok @@ -71,7 +72,7 @@ func CreatePgouser(clientset kubernetes.Interface, createdBy string, request *ms return resp } - //publish event + // publish event topics := make([]string, 1) topics[0] = events.EventTopicPGOUser @@ -94,7 +95,6 @@ func CreatePgouser(clientset kubernetes.Interface, createdBy string, request *ms } return resp - } // ShowPgouser ... @@ -147,7 +147,6 @@ func ShowPgouser(clientset kubernetes.Interface, request *msgs.ShowPgouserReques } return resp - } // DeletePgouser ... @@ -170,7 +169,7 @@ func DeletePgouser(clientset kubernetes.Interface, deletedBy string, request *ms resp.Results = append(resp.Results, "error deleting secret "+secretName) } else { resp.Results = append(resp.Results, "deleted pgouser "+v) - //publish event + // publish event topics := make([]string, 1) topics[0] = events.EventTopicPGOUser @@ -198,7 +197,6 @@ func DeletePgouser(clientset kubernetes.Interface, deletedBy string, request *ms } return resp - } // UpdatePgouser - update the pgouser secret @@ -253,7 +251,7 @@ func UpdatePgouser(clientset kubernetes.Interface, updatedBy string, request *ms return resp } - //publish event + // publish event topics := make([]string, 1) topics[0] = events.EventTopicPGOUser @@ -275,7 +273,6 @@ func UpdatePgouser(clientset kubernetes.Interface, updatedBy string, request *ms } return resp - } func createSecret(clientset kubernetes.Interface, createdBy string, request *msgs.CreatePgouserRequest) error { @@ -323,7 +320,6 @@ func validRoles(clientset kubernetes.Interface, roles string) error { } func validNamespaces(namespaces string, allnamespaces bool) error { - if allnamespaces { return nil } diff --git a/internal/apiserver/pgouserservice/pgouserservice.go b/internal/apiserver/pgouserservice/pgouserservice.go index ccf1b1ce8f..1f30da2a09 100644 --- a/internal/apiserver/pgouserservice/pgouserservice.go +++ b/internal/apiserver/pgouserservice/pgouserservice.go @@ -1,7 +1,7 @@ package pgouserservice /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,11 +17,12 @@ limitations under the License. import ( "encoding/json" + "net/http" + apiserver "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/validation" - "net/http" ) func CreatePgouserHandler(w http.ResponseWriter, r *http.Request) { @@ -63,7 +64,7 @@ func CreatePgouserHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -75,7 +76,7 @@ func CreatePgouserHandler(w http.ResponseWriter, r *http.Request) { resp = CreatePgouser(apiserver.Clientset, username, &request) } - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } func DeletePgouserHandler(w http.ResponseWriter, r *http.Request) { @@ -117,14 +118,13 @@ func DeletePgouserHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = DeletePgouser(apiserver.Clientset, username, &request) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } func ShowPgouserHandler(w http.ResponseWriter, r *http.Request) { @@ -167,14 +167,13 @@ func ShowPgouserHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ShowPgouser(apiserver.Clientset, &request) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } func UpdatePgouserHandler(w http.ResponseWriter, r *http.Request) { @@ -213,5 +212,5 @@ func UpdatePgouserHandler(w http.ResponseWriter, r *http.Request) { resp.Status = msgs.Status{Code: msgs.Ok, Msg: ""} resp = UpdatePgouser(apiserver.Clientset, username, &request) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/policyservice/policyimpl.go b/internal/apiserver/policyservice/policyimpl.go index e0302eeb6c..cfa796c49e 100644 --- a/internal/apiserver/policyservice/policyimpl.go +++ b/internal/apiserver/policyservice/policyimpl.go @@ -1,7 +1,7 @@ package policyservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -37,7 +37,7 @@ import ( ) // CreatePolicy ... -func CreatePolicy(client pgo.Interface, policyName, policyURL, policyFile, ns, pgouser string) (bool, error) { +func CreatePolicy(client pgo.Interface, policyName, policyFile, ns, pgouser string) (bool, error) { ctx := context.TODO() log.Debugf("create policy called for %s", policyName) @@ -46,7 +46,6 @@ func CreatePolicy(client pgo.Interface, policyName, policyURL, policyFile, ns, p spec := crv1.PgpolicySpec{} spec.Namespace = ns spec.Name = policyName - spec.URL = policyURL spec.SQL = policyFile myLabels := make(map[string]string) @@ -68,7 +67,6 @@ func CreatePolicy(client pgo.Interface, policyName, policyURL, policyFile, ns, p } return false, err - } // ShowPolicy ... @@ -77,7 +75,7 @@ func ShowPolicy(client pgo.Interface, name string, allflags bool, ns string) crv policyList := crv1.PgpolicyList{} if allflags { - //get a list of all policies + // get a list of all policies list, err := client.CrunchydataV1().Pgpolicies(ns).List(ctx, metav1.ListOptions{}) if list != nil && err == nil { policyList = *list @@ -90,7 +88,6 @@ func ShowPolicy(client pgo.Interface, name string, allflags bool, ns string) crv } return policyList - } // DeletePolicy ... @@ -110,18 +107,19 @@ func DeletePolicy(client pgo.Interface, policyName, ns, pgouser string) msgs.Del policyFound := false log.Debugf("deleting policy %s", policyName) - for _, policy := range policyList.Items { + for i := range policyList.Items { + policy := &policyList.Items[i] if policyName == "all" || policyName == policy.Spec.Name { - //update pgpolicy with current pgouser so that - //we can create an event holding the pgouser - //that deleted the policy + // update pgpolicy with current pgouser so that + // we can create an event holding the pgouser + // that deleted the policy policy.ObjectMeta.Labels[config.LABEL_PGOUSER] = pgouser - _, err = client.CrunchydataV1().Pgpolicies(ns).Update(ctx, &policy, metav1.UpdateOptions{}) + _, err = client.CrunchydataV1().Pgpolicies(ns).Update(ctx, policy, metav1.UpdateOptions{}) if err != nil { log.Error(err) } - //ok, now delete the pgpolicy + // ok, now delete the pgpolicy policyFound = true err = client.CrunchydataV1().Pgpolicies(ns).Delete(ctx, policy.Spec.Name, metav1.DeleteOptions{}) if err == nil { @@ -145,7 +143,6 @@ func DeletePolicy(client pgo.Interface, policyName, ns, pgouser string) msgs.Del } return resp - } // ApplyPolicy ... @@ -159,7 +156,7 @@ func ApplyPolicy(request *msgs.ApplyPolicyRequest, ns, pgouser string) msgs.Appl resp.Status.Msg = "" resp.Status.Code = msgs.Ok - //validate policy + // validate policy err = util.ValidatePolicy(apiserver.Clientset, ns, request.Name) if err != nil { resp.Status.Code = msgs.Error @@ -167,11 +164,11 @@ func ApplyPolicy(request *msgs.ApplyPolicyRequest, ns, pgouser string) msgs.Appl return resp } - //get filtered list of Deployments + // get filtered list of Deployments selector := request.Selector log.Debugf("apply policy selector string=[%s]", selector) - //get a list of all clusters + // get a list of all clusters clusterList, err := apiserver.Clientset. CrunchydataV1().Pgclusters(ns). List(ctx, metav1.ListOptions{LabelSelector: selector}) @@ -212,7 +209,7 @@ func ApplyPolicy(request *msgs.ApplyPolicyRequest, ns, pgouser string) msgs.Appl if request.DryRun { for _, d := range allDeployments { - log.Debugf("deployment : %s", d.ObjectMeta.Name) + log.Debugf("deployment: %s", d.ObjectMeta.Name) resp.Name = append(resp.Name, d.ObjectMeta.Name) } return resp @@ -232,14 +229,14 @@ func ApplyPolicy(request *msgs.ApplyPolicyRequest, ns, pgouser string) msgs.Appl if d.ObjectMeta.Labels[config.LABEL_SERVICE_NAME] != d.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] { log.Debugf("skipping apply policy on deployment %s", d.Name) continue - //skip non primary deployments + // skip non primary deployments } log.Debugf("apply policy %s on deployment %s based on selector %s", request.Name, d.ObjectMeta.Name, selector) cl, err := apiserver.Clientset. CrunchydataV1().Pgclusters(ns). - Get(ctx, d.ObjectMeta.Labels[config.LABEL_SERVICE_NAME], metav1.GetOptions{}) + Get(ctx, d.ObjectMeta.Labels[config.LABEL_PG_CLUSTER], metav1.GetOptions{}) if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() @@ -247,7 +244,7 @@ func ApplyPolicy(request *msgs.ApplyPolicyRequest, ns, pgouser string) msgs.Appl } if err := util.ExecPolicy(apiserver.Clientset, apiserver.RESTConfig, - ns, request.Name, d.ObjectMeta.Labels[config.LABEL_SERVICE_NAME], cl.Spec.Port); err != nil { + ns, request.Name, d.ObjectMeta.Labels[config.LABEL_PG_CLUSTER], cl.Spec.Port); err != nil { log.Error(err) resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() @@ -261,7 +258,7 @@ func ApplyPolicy(request *msgs.ApplyPolicyRequest, ns, pgouser string) msgs.Appl log.Error(err) } - //update the pgcluster crd labels with the new policy + // update the pgcluster crd labels with the new policy log.Debugf("patching cluster %s: %s", cl.Name, patch) _, err = apiserver.Clientset.CrunchydataV1().Pgclusters(ns). Patch(ctx, cl.Name, types.MergePatchType, patch, metav1.PatchOptions{}) @@ -271,7 +268,7 @@ func ApplyPolicy(request *msgs.ApplyPolicyRequest, ns, pgouser string) msgs.Appl resp.Name = append(resp.Name, d.ObjectMeta.Name) - //publish event + // publish event topics := make([]string, 1) topics[0] = events.EventTopicPolicy @@ -294,5 +291,4 @@ func ApplyPolicy(request *msgs.ApplyPolicyRequest, ns, pgouser string) msgs.Appl } return resp - } diff --git a/internal/apiserver/policyservice/policyservice.go b/internal/apiserver/policyservice/policyservice.go index d2a3d6234f..026228dc40 100644 --- a/internal/apiserver/policyservice/policyservice.go +++ b/internal/apiserver/policyservice/policyservice.go @@ -1,7 +1,7 @@ package policyservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -65,7 +65,7 @@ func CreatePolicyHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -73,7 +73,7 @@ func CreatePolicyHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -83,7 +83,7 @@ func CreatePolicyHandler(w http.ResponseWriter, r *http.Request) { resp.Status.Msg = "invalid policy name format " + errs[0] } else { - found, err := CreatePolicy(apiserver.Clientset, request.Name, request.URL, request.SQL, ns, username) + found, err := CreatePolicy(apiserver.Clientset, request.Name, request.SQL, ns, username) if err != nil { log.Error(err.Error()) resp.Status.Code = msgs.Error @@ -95,7 +95,7 @@ func CreatePolicyHandler(w http.ResponseWriter, r *http.Request) { } } - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } // DeletePolicyHandler ... @@ -145,7 +145,7 @@ func DeletePolicyHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -153,14 +153,13 @@ func DeletePolicyHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = DeletePolicy(apiserver.Clientset, policyname, ns, username) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // ShowPolicyHandler ... @@ -212,7 +211,7 @@ func ShowPolicyHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -220,14 +219,13 @@ func ShowPolicyHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp.PolicyList = ShowPolicy(apiserver.Clientset, policyname, request.AllFlag, ns) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // ApplyPolicyHandler ... @@ -271,10 +269,10 @@ func ApplyPolicyHandler(w http.ResponseWriter, r *http.Request) { ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ApplyPolicy(&request, ns, username) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/pvcservice/pvcimpl.go b/internal/apiserver/pvcservice/pvcimpl.go index 091aa7b67d..2aa80e6e23 100644 --- a/internal/apiserver/pvcservice/pvcimpl.go +++ b/internal/apiserver/pvcservice/pvcimpl.go @@ -1,7 +1,7 @@ package pvcservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/apiserver/pvcservice/pvcservice.go b/internal/apiserver/pvcservice/pvcservice.go index a12979cb3c..5cbb939867 100644 --- a/internal/apiserver/pvcservice/pvcservice.go +++ b/internal/apiserver/pvcservice/pvcservice.go @@ -1,7 +1,7 @@ package pvcservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,10 +17,11 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) // ShowPVCHandler ... @@ -76,7 +77,7 @@ func ShowPVCHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -84,7 +85,7 @@ func ShowPVCHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -94,5 +95,5 @@ func ShowPVCHandler(w http.ResponseWriter, r *http.Request) { resp.Status.Msg = err.Error() } - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/reloadservice/reloadimpl.go b/internal/apiserver/reloadservice/reloadimpl.go index dba6e6cd8f..1f081501c1 100644 --- a/internal/apiserver/reloadservice/reloadimpl.go +++ b/internal/apiserver/reloadservice/reloadimpl.go @@ -1,7 +1,7 @@ package reloadservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -19,13 +19,11 @@ import ( "context" "fmt" "strings" - "time" "github.com/crunchydata/postgres-operator/internal/apiserver" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/patroni" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" - "github.com/crunchydata/postgres-operator/pkg/events" log "github.com/sirupsen/logrus" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -99,11 +97,6 @@ func Reload(request *msgs.ReloadRequest, ns, username string) msgs.ReloadRespons } resp.Results = append(resp.Results, fmt.Sprintf("reload performed on %s", clusterName)) - - if err := publishReloadClusterEvent(cluster.GetName(), ns, username); err != nil { - log.Error(err.Error()) - errorMsgs = append(errorMsgs, err.Error()) - } } if len(errorMsgs) > 0 { @@ -113,27 +106,3 @@ func Reload(request *msgs.ReloadRequest, ns, username string) msgs.ReloadRespons return resp } - -// publishReloadClusterEvent publishes an event when a cluster is reloaded -func publishReloadClusterEvent(clusterName, username, namespace string) error { - - topics := make([]string, 1) - topics[0] = events.EventTopicCluster - - f := events.EventReloadClusterFormat{ - EventHeader: events.EventHeader{ - Namespace: namespace, - Username: username, - Topic: topics, - Timestamp: time.Now(), - EventType: events.EventReloadCluster, - }, - Clustername: clusterName, - } - - if err := events.Publish(f); err != nil { - return err - } - - return nil -} diff --git a/internal/apiserver/reloadservice/reloadservice.go b/internal/apiserver/reloadservice/reloadservice.go index 9d1096c3c9..d72ade0d1b 100644 --- a/internal/apiserver/reloadservice/reloadservice.go +++ b/internal/apiserver/reloadservice/reloadservice.go @@ -1,7 +1,7 @@ package reloadservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,10 +17,11 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) // ReloadHandler ... @@ -67,7 +68,7 @@ func ReloadHandler(w http.ResponseWriter, r *http.Request) { resp := msgs.ReloadResponse{} resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -76,9 +77,9 @@ func ReloadHandler(w http.ResponseWriter, r *http.Request) { resp := msgs.ReloadResponse{} resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } - json.NewEncoder(w).Encode(reloadResponse) + _ = json.NewEncoder(w).Encode(reloadResponse) } diff --git a/internal/apiserver/restartservice/restartimpl.go b/internal/apiserver/restartservice/restartimpl.go index 5d1545d8e4..f8ef1419c7 100644 --- a/internal/apiserver/restartservice/restartimpl.go +++ b/internal/apiserver/restartservice/restartimpl.go @@ -1,7 +1,7 @@ package restartservice /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -23,8 +23,10 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/patroni" "github.com/crunchydata/postgres-operator/internal/util" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -63,6 +65,46 @@ func Restart(request *msgs.RestartRequest, pgouser string) msgs.RestartResponse return resp } + // if a rolling update is requested, this takes a detour to create a pgtask + // to accomplish this + if request.RollingUpdate { + // since a rolling update takes time, this needs to be performed as a + // separate task + // Create a pgtask + task := &crv1.Pgtask{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s", cluster.Name, config.LABEL_RESTART), + Namespace: cluster.Namespace, + Labels: map[string]string{ + config.LABEL_PG_CLUSTER: cluster.Name, + config.LABEL_PGOUSER: pgouser, + }, + }, + Spec: crv1.PgtaskSpec{ + TaskType: crv1.PgtaskRollingUpdate, + Parameters: map[string]string{ + config.LABEL_PG_CLUSTER: cluster.Name, + }, + }, + } + + // remove any previous rolling restart, then add a new one + if err := apiserver.Clientset.CrunchydataV1().Pgtasks(task.Namespace).Delete(ctx, task.Name, + metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) { + resp.Status.Code = msgs.Error + resp.Status.Msg = err.Error() + return resp + } + + if _, err := apiserver.Clientset.CrunchydataV1().Pgtasks(cluster.Namespace).Create(ctx, task, + metav1.CreateOptions{}); err != nil { + resp.Status.Code = msgs.Error + resp.Status.Msg = err.Error() + } + + return resp + } + var restartResults []patroni.RestartResult // restart either the whole cluster, or just any targets specified patroniClient := patroni.NewPatroniClient(apiserver.RESTConfig, apiserver.Clientset, diff --git a/internal/apiserver/restartservice/restartservice.go b/internal/apiserver/restartservice/restartservice.go index a1bfb97194..e8a29c97a1 100644 --- a/internal/apiserver/restartservice/restartservice.go +++ b/internal/apiserver/restartservice/restartservice.go @@ -1,7 +1,7 @@ package restartservice /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -56,14 +56,14 @@ func RestartHandler(w http.ResponseWriter, r *http.Request) { var request msgs.RestartRequest if err := json.NewDecoder(r.Body).Decode(&request); err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } username, err := apiserver.Authn(apiserver.RESTART_PERM, w, r) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -73,17 +73,17 @@ func RestartHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } if _, err := apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace); err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } - json.NewEncoder(w).Encode(Restart(&request, username)) + _ = json.NewEncoder(w).Encode(Restart(&request, username)) } // QueryRestartHandler handles requests to query a cluster for instances available to use as @@ -131,7 +131,7 @@ func QueryRestartHandler(w http.ResponseWriter, r *http.Request) { username, err := apiserver.Authn(apiserver.RESTART_PERM, w, r) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -141,14 +141,14 @@ func QueryRestartHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } if _, err := apiserver.GetNamespace(apiserver.Clientset, username, namespace); err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } - json.NewEncoder(w).Encode(QueryRestart(clusterName, namespace)) + _ = json.NewEncoder(w).Encode(QueryRestart(clusterName, namespace)) } diff --git a/internal/apiserver/root.go b/internal/apiserver/root.go index f2a5ab149b..15dcd369b0 100644 --- a/internal/apiserver/root.go +++ b/internal/apiserver/root.go @@ -1,7 +1,7 @@ package apiserver /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,7 +17,7 @@ limitations under the License. import ( "context" - "crypto/rsa" + "crypto/ecdsa" "crypto/x509" "errors" "fmt" @@ -64,8 +64,10 @@ var DebugFlag bool var BasicAuth bool // Namespace comes from the apiserver config in this version -var PgoNamespace string -var InstallationName string +var ( + PgoNamespace string + InstallationName string +) var CRUNCHY_DEBUG bool @@ -90,7 +92,6 @@ var Pgo config.PgoConfig var namespaceOperatingMode ns.NamespaceOperatingMode func Initialize() { - PgoNamespace = os.Getenv("PGO_OPERATOR_NAMESPACE") if PgoNamespace == "" { log.Info("PGO_OPERATOR_NAMESPACE environment variable is not set and is required, this is the namespace that the Operator is to run within.") @@ -151,7 +152,6 @@ func Initialize() { } func connectToKube() { - client, err := kubeapi.NewClient() if err != nil { panic(err) @@ -193,14 +193,13 @@ func initConfig() { func BasicAuthCheck(username, password string) bool { ctx := context.TODO() - if BasicAuth == false { + if !BasicAuth { return true } - //see if there is a pgouser Secret for this username + // see if there is a pgouser Secret for this username secretName := "pgouser-" + username secret, err := Clientset.CoreV1().Secrets(PgoNamespace).Get(ctx, secretName, metav1.GetOptions{}) - if err != nil { log.Errorf("could not get pgouser secret %s: %s", username, err.Error()) return false @@ -213,13 +212,12 @@ func BasicAuthzCheck(username, perm string) bool { ctx := context.TODO() secretName := "pgouser-" + username secret, err := Clientset.CoreV1().Secrets(PgoNamespace).Get(ctx, secretName, metav1.GetOptions{}) - if err != nil { log.Errorf("could not get pgouser secret %s: %s", username, err.Error()) return false } - //get the roles for this user + // get the roles for this user rolesString := string(secret.Data["roles"]) roles := strings.Split(rolesString, ",") if len(roles) == 0 { @@ -227,13 +225,12 @@ func BasicAuthzCheck(username, perm string) bool { return false } - //venture thru each role this user has looking for a perm match + // venture thru each role this user has looking for a perm match for _, r := range roles { - //get the pgorole + // get the pgorole roleSecretName := "pgorole-" + r rolesecret, err := Clientset.CoreV1().Secrets(PgoNamespace).Get(ctx, roleSecretName, metav1.GetOptions{}) - if err != nil { log.Errorf("could not get pgorole secret %s: %s", r, err.Error()) return false @@ -262,14 +259,12 @@ func BasicAuthzCheck(username, perm string) bool { } return false - } -//GetNamespace determines if a user has permission for -//a namespace they are requesting -//a valid requested namespace is required +// GetNamespace determines if a user has permission for +// a namespace they are requesting +// a valid requested namespace is required func GetNamespace(clientset kubernetes.Interface, username, requestedNS string) (string, error) { - log.Debugf("GetNamespace username [%s] ns [%s]", username, requestedNS) if requestedNS == "" { @@ -281,11 +276,11 @@ func GetNamespace(clientset kubernetes.Interface, username, requestedNS string) return requestedNS, fmt.Errorf("Error when determining whether user [%s] is allowed access to "+ "namespace [%s]: %s", username, requestedNS, err.Error()) } - if iAccess == false { + if !iAccess { errMsg := fmt.Sprintf("namespace [%s] is not part of the Operator installation", requestedNS) return requestedNS, errors.New(errMsg) } - if uAccess == false { + if !uAccess { errMsg := fmt.Sprintf("user [%s] is not allowed access to namespace [%s]", username, requestedNS) return requestedNS, errors.New(errMsg) } @@ -339,7 +334,6 @@ func Authn(perm string, w http.ResponseWriter, r *http.Request) (string, error) log.Debug("Authentication Success") return username, err - } func IsValidStorageName(name string) bool { @@ -375,7 +369,7 @@ func UserIsPermittedInNamespace(username, requestedNS string) (bool, bool, error } if iAccess { - //get the pgouser Secret for this username + // get the pgouser Secret for this username userSecretName := "pgouser-" + username userSecret, err := Clientset.CoreV1().Secrets(PgoNamespace).Get(ctx, userSecretName, metav1.GetOptions{}) if err != nil { @@ -408,7 +402,6 @@ func UserIsPermittedInNamespace(username, requestedNS string) (bool, bool, error func WriteTLSCert(certPath, keyPath string) error { ctx := context.TODO() pgoSecret, err := Clientset.CoreV1().Secrets(PgoNamespace).Get(ctx, PGOSecretName, metav1.GetOptions{}) - // if the TLS certificate secret is not found, attempt to generate one if err != nil { log.Infof("%s Secret NOT found in namespace %s", PGOSecretName, PgoNamespace) @@ -425,13 +418,13 @@ func WriteTLSCert(certPath, keyPath string) error { log.Infof("%s Secret found in namespace %s", PGOSecretName, PgoNamespace) log.Infof("cert key data len is %d", len(pgoSecret.Data[corev1.TLSCertKey])) - if err := ioutil.WriteFile(certPath, pgoSecret.Data[corev1.TLSCertKey], 0644); err != nil { + if err := ioutil.WriteFile(certPath, pgoSecret.Data[corev1.TLSCertKey], 0o600); err != nil { return err } log.Infof("private key data len is %d", len(pgoSecret.Data[corev1.TLSPrivateKeyKey])) - if err := ioutil.WriteFile(keyPath, pgoSecret.Data[corev1.TLSPrivateKeyKey], 0644); err != nil { + if err := ioutil.WriteFile(keyPath, pgoSecret.Data[corev1.TLSPrivateKeyKey], 0o600); err != nil { return err } @@ -444,8 +437,8 @@ func generateTLSCert(certPath, keyPath string) error { ctx := context.TODO() var err error - //generate private key - var privateKey *rsa.PrivateKey + // generate private key + var privateKey *ecdsa.PrivateKey privateKey, err = tlsutil.NewPrivateKey() if err != nil { fmt.Println(err.Error()) @@ -481,15 +474,14 @@ func generateTLSCert(certPath, keyPath string) error { os.Exit(2) } - if err := ioutil.WriteFile(certPath, newSecret.Data[corev1.TLSCertKey], 0644); err != nil { + if err := ioutil.WriteFile(certPath, newSecret.Data[corev1.TLSCertKey], 0o600); err != nil { return err } - if err := ioutil.WriteFile(keyPath, newSecret.Data[corev1.TLSPrivateKeyKey], 0644); err != nil { + if err := ioutil.WriteFile(keyPath, newSecret.Data[corev1.TLSPrivateKeyKey], 0o600); err != nil { return err } return err - } // setNamespaceOperatingMode set the namespace operating mode for the Operator by calling the @@ -530,7 +522,6 @@ func setRandomPgouserPasswords() { // generate the password using the default password length generatedPassword, err := util.GeneratePassword(util.DefaultGeneratedPasswordLength) - if err != nil { log.Errorf("Could not generate password for pgouser secret %s for operator installation %s in "+ "namespace %s", secret.Name, InstallationName, PgoNamespace) @@ -539,7 +530,6 @@ func setRandomPgouserPasswords() { // create the password patch patch, err := kubeapi.NewMergePatch().Add("stringData", "password")(generatedPassword).Bytes() - if err != nil { log.Errorf("Could not generate password patch for pgouser secret %s for operator installation "+ "%s in namespace %s", secret.Name, InstallationName, PgoNamespace) diff --git a/internal/apiserver/routing/doc.go b/internal/apiserver/routing/doc.go index e985fd4280..b050245796 100644 --- a/internal/apiserver/routing/doc.go +++ b/internal/apiserver/routing/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/apiserver/routing/routes.go b/internal/apiserver/routing/routes.go index 96de93403a..fc804ff11a 100644 --- a/internal/apiserver/routing/routes.go +++ b/internal/apiserver/routing/routes.go @@ -1,7 +1,7 @@ package routing /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -75,6 +75,7 @@ func RegisterAllRoutes(r *mux.Router) { func RegisterBackrestSvcRoutes(r *mux.Router) { r.HandleFunc("/backrestbackup", backrestservice.CreateBackupHandler).Methods("POST") r.HandleFunc("/backrest/{name}", backrestservice.ShowBackrestHandler).Methods("GET") + r.HandleFunc("/backrest", backrestservice.DeleteBackrestHandler).Methods("DELETE") r.HandleFunc("/restore", backrestservice.RestoreHandler).Methods("POST") } @@ -90,7 +91,7 @@ func RegisterClusterSvcRoutes(r *mux.Router) { r.HandleFunc("/clustersdelete", clusterservice.DeleteClusterHandler).Methods("POST") r.HandleFunc("/clustersupdate", clusterservice.UpdateClusterHandler).Methods("POST") r.HandleFunc("/testclusters", clusterservice.TestClusterHandler).Methods("POST") - r.HandleFunc("/clusters/scale/{name}", clusterservice.ScaleClusterHandler) + r.HandleFunc("/clusters/scale/{name}", clusterservice.ScaleClusterHandler).Methods("POST") r.HandleFunc("/scale/{name}", clusterservice.ScaleQueryHandler).Methods("GET") r.HandleFunc("/scaledown/{name}", clusterservice.ScaleDownHandler).Methods("GET") } diff --git a/internal/apiserver/scheduleservice/scheduleimpl.go b/internal/apiserver/scheduleservice/scheduleimpl.go index 09830a03d2..1acc86cdf4 100644 --- a/internal/apiserver/scheduleservice/scheduleimpl.go +++ b/internal/apiserver/scheduleservice/scheduleimpl.go @@ -1,7 +1,7 @@ package scheduleservice /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -41,9 +41,7 @@ type scheduleRequest struct { func (s scheduleRequest) createBackRestSchedule(cluster *crv1.Pgcluster, ns string) *PgScheduleSpec { name := fmt.Sprintf("%s-%s-%s", cluster.Name, s.Request.ScheduleType, s.Request.PGBackRestType) - err := util.ValidateBackrestStorageTypeOnBackupRestore(s.Request.BackrestStorageType, - cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE], false) - if err != nil { + if err := apiserver.ValidateBackrestStorageTypeForCommand(cluster, s.Request.BackrestStorageType); err != nil { s.Response.Status.Code = msgs.Error s.Response.Status.Msg = err.Error() return &PgScheduleSpec{} @@ -79,8 +77,9 @@ func (s scheduleRequest) createPolicySchedule(cluster *crv1.Pgcluster, ns string } if s.Request.Secret == "" { - s.Request.Secret = cluster.Spec.PrimarySecretName + s.Request.Secret = crv1.UserSecretName(cluster, crv1.PGUserSuperuser) } + schedule := &PgScheduleSpec{ Name: name, Cluster: cluster.Name, @@ -93,8 +92,8 @@ func (s scheduleRequest) createPolicySchedule(cluster *crv1.Pgcluster, ns string Name: s.Request.PolicyName, Database: s.Request.Database, Secret: s.Request.Secret, - ImagePrefix: util.GetValueOrDefault(cluster.Spec.PGOImagePrefix, apiserver.Pgo.Pgo.PGOImagePrefix), - ImageTag: apiserver.Pgo.Pgo.PGOImageTag, + ImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, apiserver.Pgo.Cluster.CCPImagePrefix), + ImageTag: apiserver.Pgo.Cluster.CCPImageTag, }, } return schedule @@ -143,8 +142,8 @@ func CreateSchedule(request *msgs.CreateScheduleRequest, ns string) msgs.CreateS log.Debug("Making schedules") var schedules []*PgScheduleSpec - for _, cluster := range clusterList.Items { - + for i := range clusterList.Items { + cluster := &clusterList.Items[i] // check if the current cluster is not upgraded to the deployed // Operator version. If not, do not allow the command to complete if cluster.Annotations[config.ANNOTATION_IS_UPGRADED] == config.ANNOTATIONS_FALSE { @@ -154,10 +153,10 @@ func CreateSchedule(request *msgs.CreateScheduleRequest, ns string) msgs.CreateS } switch sr.Request.ScheduleType { case "pgbackrest": - schedule := sr.createBackRestSchedule(&cluster, ns) + schedule := sr.createBackRestSchedule(cluster, ns) schedules = append(schedules, schedule) case "policy": - schedule := sr.createPolicySchedule(&cluster, ns) + schedule := sr.createPolicySchedule(cluster, ns) schedules = append(schedules, schedule) default: sr.Response.Status.Code = msgs.Error @@ -232,7 +231,7 @@ func DeleteSchedule(request *msgs.DeleteScheduleRequest, ns string) msgs.DeleteS if request.ScheduleName == "" && request.ClusterName == "" && request.Selector == "" { sr.Status.Code = msgs.Error - sr.Status.Msg = fmt.Sprintf("Cluster name, schedule name or selector must be provided") + sr.Status.Msg = "Cluster name, schedule name or selector must be provided" return *sr } @@ -280,7 +279,7 @@ func ShowSchedule(request *msgs.ShowScheduleRequest, ns string) msgs.ShowSchedul if request.ScheduleName == "" && request.ClusterName == "" && request.Selector == "" { sr.Status.Code = msgs.Error - sr.Status.Msg = fmt.Sprintf("Cluster name, schedule name or selector must be provided") + sr.Status.Msg = "Cluster name, schedule name or selector must be provided" return *sr } diff --git a/internal/apiserver/scheduleservice/scheduleservice.go b/internal/apiserver/scheduleservice/scheduleservice.go index b88fa16d7e..fae4961db6 100644 --- a/internal/apiserver/scheduleservice/scheduleservice.go +++ b/internal/apiserver/scheduleservice/scheduleservice.go @@ -1,7 +1,7 @@ package scheduleservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -98,12 +98,12 @@ func CreateScheduleHandler(w http.ResponseWriter, r *http.Request) { }, Results: make([]string, 0), } - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp := CreateSchedule(&request, ns) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } func DeleteScheduleHandler(w http.ResponseWriter, r *http.Request) { @@ -150,13 +150,13 @@ func DeleteScheduleHandler(w http.ResponseWriter, r *http.Request) { }, Results: make([]string, 0), } - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp := DeleteSchedule(&request, ns) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } func ShowScheduleHandler(w http.ResponseWriter, r *http.Request) { @@ -204,10 +204,10 @@ func ShowScheduleHandler(w http.ResponseWriter, r *http.Request) { Results: make([]string, 0), } - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp := ShowSchedule(&request, ns) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/statusservice/statusimpl.go b/internal/apiserver/statusservice/statusimpl.go index 958da66604..ff65e719ce 100644 --- a/internal/apiserver/statusservice/statusimpl.go +++ b/internal/apiserver/statusservice/statusimpl.go @@ -1,7 +1,7 @@ package statusservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -46,7 +46,7 @@ func Status(ns string) msgs.StatusResponse { func getNumClaims(ns string) int { ctx := context.TODO() - //count number of PVCs with pgremove=true + // count number of PVCs with pgremove=true pvcs, err := apiserver.Clientset. CoreV1().PersistentVolumeClaims(ns). List(ctx, metav1.ListOptions{LabelSelector: config.LABEL_PGREMOVE}) @@ -59,7 +59,7 @@ func getNumClaims(ns string) int { func getNumDatabases(ns string) int { ctx := context.TODO() - //count number of Deployments with pg-cluster + // count number of Deployments with pg-cluster deps, err := apiserver.Clientset. AppsV1().Deployments(ns). List(ctx, metav1.ListOptions{LabelSelector: config.LABEL_PG_CLUSTER}) @@ -72,7 +72,7 @@ func getNumDatabases(ns string) int { func getVolumeCap(ns string) string { ctx := context.TODO() - //sum all PVCs storage capacity + // sum all PVCs storage capacity pvcs, err := apiserver.Clientset. CoreV1().PersistentVolumeClaims(ns). List(ctx, metav1.ListOptions{LabelSelector: config.LABEL_PGREMOVE}) @@ -83,18 +83,18 @@ func getVolumeCap(ns string) string { var capTotal int64 capTotal = 0 - for _, p := range pvcs.Items { - capTotal = capTotal + getClaimCapacity(&p) + for i := range pvcs.Items { + capTotal = capTotal + getClaimCapacity(&pvcs.Items[i]) } q := resource.NewQuantity(capTotal, resource.BinarySI) - //log.Infof("capTotal string is %s\n", q.String()) + // log.Infof("capTotal string is %s\n", q.String()) return q.String() } func getDBTags(ns string) map[string]int { ctx := context.TODO() results := make(map[string]int) - //count all pods with pg-cluster, sum by image tag value + // count all pods with pg-cluster, sum by image tag value pods, err := apiserver.Clientset.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: config.LABEL_PG_CLUSTER}) if err != nil { log.Error(err) @@ -111,7 +111,7 @@ func getDBTags(ns string) map[string]int { func getNotReady(ns string) []string { ctx := context.TODO() - //show all database pods for each pgcluster that are not yet running + // show all database pods for each pgcluster that are not yet running agg := make([]string, 0) clusterList, err := apiserver.Clientset.CrunchydataV1().Pgclusters(ns).List(ctx, metav1.ListOptions{}) if err != nil { @@ -152,7 +152,6 @@ func getClaimCapacity(pvc *v1.PersistentVolumeClaim) int64 { diskSizeInt64, _ := diskSize.AsInt64() return diskSizeInt64 - } func getLabels(ns string) []msgs.KeyValue { @@ -168,7 +167,6 @@ func getLabels(ns string) []msgs.KeyValue { } for _, dep := range deps.Items { - for k, v := range dep.ObjectMeta.Labels { lv := k + "=" + v if results[lv] == 0 { @@ -177,7 +175,6 @@ func getLabels(ns string) []msgs.KeyValue { results[lv] = results[lv] + 1 } } - } for k, v := range results { @@ -189,5 +186,4 @@ func getLabels(ns string) []msgs.KeyValue { }) return ss - } diff --git a/internal/apiserver/statusservice/statusservice.go b/internal/apiserver/statusservice/statusservice.go index ecab0047c2..2931c4d565 100644 --- a/internal/apiserver/statusservice/statusservice.go +++ b/internal/apiserver/statusservice/statusservice.go @@ -1,7 +1,7 @@ package statusservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,11 +17,12 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" //"github.com/gorilla/mux" - "net/http" ) // StatusHandler ... @@ -71,7 +72,7 @@ func StatusHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp = msgs.StatusResponse{} resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -79,11 +80,11 @@ func StatusHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp = msgs.StatusResponse{} resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = Status(ns) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/upgradeservice/upgradeimpl.go b/internal/apiserver/upgradeservice/upgradeimpl.go index 69e3a40927..b38f0ed271 100644 --- a/internal/apiserver/upgradeservice/upgradeimpl.go +++ b/internal/apiserver/upgradeservice/upgradeimpl.go @@ -1,7 +1,7 @@ package upgradeservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -36,7 +36,6 @@ import ( // Currently supported version information for upgrades const ( REQUIRED_MAJOR_PGO_VERSION = 4 - MAXIMUM_MINOR_PGO_VERSION = 5 MINIMUM_MINOR_PGO_VERSION = 1 ) @@ -167,10 +166,10 @@ func CreateUpgrade(request *msgs.CreateUpgradeRequest, ns, pgouser string) msgs. // image tag (first value) is compatible (i.e. is the same Major PostgreSQL version) as the // existing cluster's PG value, unless the --ignore-validation flag is set or the --post-gis-image-tag // flag is used - if !upgradeTagValid(cl.Spec.CCPImageTag, apiserver.Pgo.Cluster.CCPImageTag) && !request.IgnoreValidation && request.UpgradeCCPImageTag != "" { - log.Debugf("Cannot upgrade from %s to %s. Image must be the same base OS and the upgrade must be within the same major PG version.", cl.Spec.CCPImageTag, apiserver.Pgo.Cluster.CCPImageTag) + if !upgradeTagValid(cl.Spec.CCPImageTag, spec.Parameters[config.LABEL_CCP_IMAGE_KEY]) && !request.IgnoreValidation && spec.Parameters[config.LABEL_CCP_IMAGE_KEY] != "" { + log.Debugf("Cannot upgrade from %s to %s. Image must be the same base OS and the upgrade must be within the same major PG version.", cl.Spec.CCPImageTag, spec.Parameters[config.LABEL_CCP_IMAGE_KEY]) response.Status.Code = msgs.Error - response.Status.Msg = fmt.Sprintf("cannot upgrade from %s to %s, upgrade task failed.", cl.Spec.CCPImageTag, apiserver.Pgo.Cluster.CCPImageTag) + response.Status.Msg = fmt.Sprintf("cannot upgrade from %s to %s, upgrade task failed.", cl.Spec.CCPImageTag, spec.Parameters[config.LABEL_CCP_IMAGE_KEY]) return response } @@ -224,20 +223,15 @@ func supportedOperatorVersion(version string) bool { log.Errorf("Cannot convert Postgres Operator's minor version to an integer. Error: %v", err) return false } - if minor < MINIMUM_MINOR_PGO_VERSION || minor > MAXIMUM_MINOR_PGO_VERSION { - return false - } // If none of the above is true, the upgrade can continue - return true - + return minor >= MINIMUM_MINOR_PGO_VERSION } // upgradeTagValid compares and validates the PostgreSQL version values stored // in the image tag of the existing pgcluster CR against the values set in the // Postgres Operator's configuration func upgradeTagValid(upgradeFrom, upgradeTo string) bool { - log.Debugf("Validating upgrade from %s to %s", upgradeFrom, upgradeTo) versionRegex := regexp.MustCompile(`-(\d+)\.(\d+)(\.\d+)?-`) @@ -280,5 +274,4 @@ func upgradeTagValid(upgradeFrom, upgradeTo string) bool { // if none of the above conditions are met, a two digit Major version upgrade is likely being // attempted, or a tag value or general error occurred, so we cannot continue return false - } diff --git a/internal/apiserver/upgradeservice/upgradeservice.go b/internal/apiserver/upgradeservice/upgradeservice.go index dee9c68dc2..2c10d1b2ed 100644 --- a/internal/apiserver/upgradeservice/upgradeservice.go +++ b/internal/apiserver/upgradeservice/upgradeservice.go @@ -1,7 +1,7 @@ package upgradeservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -71,17 +71,17 @@ func CreateUpgradeHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } ns, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = CreateUpgrade(&request, ns, username) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/userservice/userimpl.go b/internal/apiserver/userservice/userimpl.go index 603fad07bd..3991bc7217 100644 --- a/internal/apiserver/userservice/userimpl.go +++ b/internal/apiserver/userservice/userimpl.go @@ -1,7 +1,7 @@ package userservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -104,10 +104,8 @@ const ( sqlDelimiter = "|" ) -var ( - // sqlCommand is the command that needs to be executed for running SQL - sqlCommand = []string{"psql", "-A", "-t"} -) +// sqlCommand is the command that needs to be executed for running SQL +var sqlCommand = []string{"psql", "-A", "-t"} // CreatueUser allows one to create a PostgreSQL user in one of more PostgreSQL // clusters, and provides the abilit to do the following: @@ -138,7 +136,6 @@ func CreateUser(request *msgs.CreateUserRequest, pgouser string) msgs.CreateUser // try to get a list of clusters. if there is an error, return clusterList, err := getClusterList(request.Namespace, request.Clusters, request.Selector, request.AllFlag) - if err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() @@ -158,8 +155,7 @@ func CreateUser(request *msgs.CreateUserRequest, pgouser string) msgs.CreateUser } // determine if the user passed in a valid password type - passwordType, err := msgs.GetPasswordType(request.PasswordType) - + passwordType, err := apiserver.GetPasswordType(request.PasswordType) if err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() @@ -182,7 +178,8 @@ func CreateUser(request *msgs.CreateUserRequest, pgouser string) msgs.CreateUser } // iterate through each cluster and add the new PostgreSQL role to each pod - for _, cluster := range clusterList.Items { + for i := range clusterList.Items { + cluster := &clusterList.Items[i] result := msgs.UserResponseDetail{ ClusterName: cluster.Spec.ClusterName, Username: request.Username, @@ -192,8 +189,7 @@ func CreateUser(request *msgs.CreateUserRequest, pgouser string) msgs.CreateUser log.Debugf("creating user [%s] on cluster [%s]", result.Username, cluster.Spec.ClusterName) // first, find the primary Pod - pod, err := util.GetPrimaryPod(apiserver.Clientset, &cluster) - + pod, err := util.GetPrimaryPod(apiserver.Clientset, cluster) // if the primary Pod cannot be found, we're going to continue on for the // other clusters, but provide some sort of error message in the response if err != nil { @@ -226,7 +222,6 @@ func CreateUser(request *msgs.CreateUserRequest, pgouser string) msgs.CreateUser // Set the password. We want a password to be generated if the user did not // set a password _, password, hashedPassword, err := generatePassword(result.Username, request.Password, passwordType, true, request.PasswordLength) - // on the off-chance there is an error, record it and continue if err != nil { log.Error(err) @@ -255,8 +250,7 @@ func CreateUser(request *msgs.CreateUserRequest, pgouser string) msgs.CreateUser // if this user is "managed" by the Operator, add a secret. If there is an // error, we can fall through as the next step is appending the results if request.ManagedUser { - if err := util.CreateUserSecret(apiserver.Clientset, cluster.Spec.ClusterName, result.Username, - result.Password, cluster.Spec.Namespace); err != nil { + if err := util.CreateUserSecret(apiserver.Clientset, cluster, result.Username, result.Password); err != nil { log.Error(err) result.Error = true @@ -268,7 +262,7 @@ func CreateUser(request *msgs.CreateUserRequest, pgouser string) msgs.CreateUser } // if a pgAdmin deployment exists, attempt to add the user to it - if err := updatePgAdmin(&cluster, result.Username, result.Password); err != nil { + if err := updatePgAdmin(cluster, result.Username, result.Password); err != nil { log.Error(err) result.Error = true result.ErrorMessage = err.Error() @@ -306,7 +300,6 @@ func DeleteUser(request *msgs.DeleteUserRequest, pgouser string) msgs.DeleteUser // try to get a list of clusters. if there is an error, return clusterList, err := getClusterList(request.Namespace, request.Clusters, request.Selector, request.AllFlag) - if err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() @@ -315,7 +308,8 @@ func DeleteUser(request *msgs.DeleteUserRequest, pgouser string) msgs.DeleteUser // iterate through each cluster and try to delete the user! loop: - for _, cluster := range clusterList.Items { + for i := range clusterList.Items { + cluster := clusterList.Items[i] result := msgs.UserResponseDetail{ ClusterName: cluster.Spec.ClusterName, Username: request.Username, @@ -325,7 +319,6 @@ loop: // first, find the primary Pod pod, err := util.GetPrimaryPod(apiserver.Clientset, &cluster) - // if the primary Pod cannot be found, we're going to continue on for the // other clusters, but provide some sort of error message in the response if err != nil { @@ -341,7 +334,6 @@ loop: // first, get a list of all the databases in the cluster. We will need to // go through each database and drop any object that the user owns output, err := executeSQL(pod, cluster.Spec.Port, sqlFindDatabases, []string{}) - // if there is an error, record it and move on as we cannot actually deleted // the user if err != nil { @@ -452,7 +444,6 @@ func ShowUser(request *msgs.ShowUserRequest) msgs.ShowUserResponse { // them. If if this returns an error, exit here clusterList, err := getClusterList(request.Namespace, request.Clusters, request.Selector, request.AllFlag) - if err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() @@ -471,10 +462,10 @@ func ShowUser(request *msgs.ShowUserRequest) msgs.ShowUserResponse { } // iterate through each cluster and look up information about each user - for _, cluster := range clusterList.Items { + for i := range clusterList.Items { + cluster := clusterList.Items[i] // first, find the primary Pod pod, err := util.GetPrimaryPod(apiserver.Clientset, &cluster) - // if the primary Pod cannot be found, we're going to continue on for the // other clusters, but provide some sort of error message in the response if err != nil { @@ -503,7 +494,6 @@ func ShowUser(request *msgs.ShowUserRequest) msgs.ShowUserResponse { // great, now we can perform the user lookup output, err := executeSQL(pod, cluster.Spec.Port, sql, []string{}) - // if there is an error, record it and move on to the next cluster if err != nil { log.Error(err) @@ -558,7 +548,7 @@ func ShowUser(request *msgs.ShowUserRequest) msgs.ShowUserResponse { // // We ignore any errors...if the password get set, we add it. If not, we // don't - secretName := fmt.Sprintf(util.UserSecretFormat, result.ClusterName, result.Username) + secretName := crv1.UserSecretName(&cluster, result.Username) password, _ := util.GetPasswordFromSecret(apiserver.Clientset, pod.Namespace, secretName) if password != "" { @@ -603,14 +593,15 @@ func UpdateUser(request *msgs.UpdateUserRequest, pgouser string) msgs.UpdateUser // if this involes updating a specific PostgreSQL account, and it is a system // account, return here - if request.Username != "" && util.IsPostgreSQLUserSystemAccount(request.Username) { + if request.Username != "" && util.IsPostgreSQLUserSystemAccount(request.Username) && !request.SetSystemAccountPassword { response.Status.Code = msgs.Error - response.Status.Msg = fmt.Sprintf(errSystemAccountFormat, request.Username) + response.Status.Msg = fmt.Sprintf(errSystemAccountFormat, request.Username) + + " You can override this with the \"--set-system-account-password\" flag." return response } // determine if the user passed in a valid password type - if _, err := msgs.GetPasswordType(request.PasswordType); err != nil { + if _, err := apiserver.GetPasswordType(request.PasswordType); err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() return response @@ -618,7 +609,6 @@ func UpdateUser(request *msgs.UpdateUserRequest, pgouser string) msgs.UpdateUser // try to get a list of clusters. if there is an error, return clusterList, err := getClusterList(request.Namespace, request.Clusters, request.Selector, request.AllFlag) - if err != nil { response.Status.Code = msgs.Error response.Status.Msg = err.Error() @@ -635,20 +625,21 @@ func UpdateUser(request *msgs.UpdateUserRequest, pgouser string) msgs.UpdateUser return response } - for _, cluster := range clusterList.Items { + for i := range clusterList.Items { var result msgs.UserResponseDetail + cluster := &clusterList.Items[i] // determine which update user actions needs to be performed switch { // determine if any passwords expiring in X days should be updated // it returns a slice of results, which are then append to the list case request.Expired > 0: - results := rotateExpiredPasswords(request, &cluster) + results := rotateExpiredPasswords(request, cluster) response.Results = append(response.Results, results...) // otherwise, perform a regular "update user" request which covers all the // other "regular" cases. It returns a result, which is append to the list default: - result = updateUser(request, &cluster) + result = updateUser(request, cluster) response.Results = append(response.Results, result) } } @@ -662,10 +653,9 @@ func UpdateUser(request *msgs.UpdateUserRequest, pgouser string) msgs.UpdateUser // error in here, but do nothing with it func deleteUserSecret(cluster crv1.Pgcluster, username string) { ctx := context.TODO() - secretName := fmt.Sprintf(util.UserSecretFormat, cluster.Spec.ClusterName, username) + secretName := crv1.UserSecretName(&cluster, username) err := apiserver.Clientset.CoreV1().Secrets(cluster.Spec.Namespace). Delete(ctx, secretName, metav1.DeleteOptions{}) - if err != nil { log.Error(err) } @@ -737,7 +727,6 @@ func generatePassword(username, password string, passwordType pgpassword.Passwor // generate the password generatedPassword, err := util.GeneratePassword(passwordLength) - // if there is an error, return if err != nil { return false, "", "", err @@ -748,13 +737,11 @@ func generatePassword(username, password string, passwordType pgpassword.Passwor // finally, hash the password postgresPassword, err := pgpassword.NewPostgresPassword(passwordType, username, password) - if err != nil { return false, "", "", err } hashedPassword, err := postgresPassword.Build() - if err != nil { return false, "", "", err } @@ -821,7 +808,6 @@ func getClusterList(namespace string, clusterNames []string, selector string, al // of arguments...or both. First, start with the selector if selector != "" { cl, err := apiserver.Clientset.CrunchydataV1().Pgclusters(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) - // if there is an error, return here with an empty cluster list if err != nil { return crv1.PgclusterList{}, err @@ -832,7 +818,6 @@ func getClusterList(namespace string, clusterNames []string, selector string, al // now try to get clusters based specific cluster names for _, clusterName := range clusterNames { cluster, err := apiserver.Clientset.CrunchydataV1().Pgclusters(namespace).Get(ctx, clusterName, metav1.GetOptions{}) - // if there is an error, capture it here and return here with an empty list if err != nil { return crv1.PgclusterList{}, err @@ -867,7 +852,6 @@ func rotateExpiredPasswords(request *msgs.UpdateUserRequest, cluster *crv1.Pgclu // first, find the primary Pod. If we can't do that, no rense in continuing pod, err := util.GetPrimaryPod(apiserver.Clientset, cluster) - if err != nil { result := msgs.UserResponseDetail{ ClusterName: cluster.Spec.ClusterName, @@ -893,7 +877,6 @@ func rotateExpiredPasswords(request *msgs.UpdateUserRequest, cluster *crv1.Pgclu // alright, time to find if there are any expired accounts. If this errors, // then we will abort here output, err := executeSQL(pod, cluster.Spec.Port, sql, []string{}) - if err != nil { result := msgs.UserResponseDetail{ ClusterName: cluster.Spec.ClusterName, @@ -957,13 +940,12 @@ func rotateExpiredPasswords(request *msgs.UpdateUserRequest, cluster *crv1.Pgclu // get the password type. the error is already evaluated in a called // function - passwordType, _ := msgs.GetPasswordType(request.PasswordType) + passwordType, _ := apiserver.GetPasswordType(request.PasswordType) // generate a new password. Check to see if the user passed in a particular // length of the password, or passed in a password to rotate (though that // is not advised...). This forced the password to change _, password, hashedPassword, err := generatePassword(result.Username, request.Password, passwordType, true, request.PasswordLength) - // on the off-chance there's an error in generating the password, record it // and continue if err != nil { @@ -1007,7 +989,6 @@ func updatePgAdmin(cluster *crv1.Pgcluster, username, password string) error { // Sync user to pgAdmin, if enabled qr, err := pgadmin.GetPgAdminQueryRunner(apiserver.Clientset, apiserver.RESTConfig, cluster) - // if there is an error, return as such if err != nil { return err @@ -1064,7 +1045,6 @@ func updateUser(request *msgs.UpdateUserRequest, cluster *crv1.Pgcluster) msgs.U // first, find the primary Pod pod, err := util.GetPrimaryPod(apiserver.Clientset, cluster) - // if the primary Pod cannot be found, we're going to continue on for the // other clusters, but provide some sort of error message in the response if err != nil { @@ -1092,10 +1072,9 @@ func updateUser(request *msgs.UpdateUserRequest, cluster *crv1.Pgcluster) msgs.U // Speaking of passwords...let's first determine if the user updated their // password. See generatePassword for how precedence is given for password // updates - passwordType, _ := msgs.GetPasswordType(request.PasswordType) + passwordType, _ := apiserver.GetPasswordType(request.PasswordType) isChanged, password, hashedPassword, err := generatePassword(result.Username, request.Password, passwordType, request.RotatePassword, request.PasswordLength) - // in the off-chance there is an error generating the password, record it // and return if err != nil { @@ -1162,6 +1141,7 @@ func updateUser(request *msgs.UpdateUserRequest, cluster *crv1.Pgcluster) msgs.U sql = fmt.Sprintf("%s %s", sql, sqlEnableLoginClause) case msgs.UpdateUserLoginDisable: sql = fmt.Sprintf("%s %s", sql, sqlDisableLoginClause) + case msgs.UpdateUserLoginDoNothing: // this is never reached -- no-op } // execute the SQL! if there is an error, return the results @@ -1180,13 +1160,12 @@ func updateUser(request *msgs.UpdateUserRequest, cluster *crv1.Pgcluster) msgs.U // has a "managed" account (i.e. there is a secret for this user account"), // we can now updated the value of that password in the secret if isChanged { - secretName := fmt.Sprintf(util.UserSecretFormat, cluster.Spec.ClusterName, result.Username) + secretName := crv1.UserSecretName(cluster, result.Username) // only call update user secret if the secret exists if _, err := apiserver.Clientset.CoreV1().Secrets(cluster.Namespace).Get(ctx, secretName, metav1.GetOptions{}); err == nil { // if we cannot update the user secret, only warn that we cannot do so - if err := util.UpdateUserSecret(apiserver.Clientset, cluster.Spec.ClusterName, - result.Username, result.Password, cluster.Namespace); err != nil { + if err := util.UpdateUserSecret(apiserver.Clientset, cluster, result.Username, result.Password); err != nil { log.Warn(err) } } diff --git a/internal/apiserver/userservice/userimpl_test.go b/internal/apiserver/userservice/userimpl_test.go index 71d3aa5fcf..8dddcf50b8 100644 --- a/internal/apiserver/userservice/userimpl_test.go +++ b/internal/apiserver/userservice/userimpl_test.go @@ -1,7 +1,7 @@ package userservice /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -30,9 +30,7 @@ func TestGeneratePassword(t *testing.T) { generatedPasswordLength := 32 t.Run("no changes", func(t *testing.T) { - changed, _, _, err := generatePassword(username, password, passwordType, generateNewPassword, generatedPasswordLength) - if err != nil { t.Error(err) return @@ -48,7 +46,6 @@ func TestGeneratePassword(t *testing.T) { t.Run("valid", func(t *testing.T) { changed, newPassword, _, err := generatePassword(username, password, passwordType, generateNewPassword, generatedPasswordLength) - if err != nil { t.Error(err) return @@ -66,7 +63,6 @@ func TestGeneratePassword(t *testing.T) { t.Run("does not override custom password", func(t *testing.T) { password := "custom" changed, newPassword, _, err := generatePassword(username, password, passwordType, generateNewPassword, generatedPasswordLength) - if err != nil { t.Error(err) return @@ -84,7 +80,6 @@ func TestGeneratePassword(t *testing.T) { t.Run("password length can be adjusted", func(t *testing.T) { generatedPasswordLength := 16 changed, newPassword, _, err := generatePassword(username, password, passwordType, generateNewPassword, generatedPasswordLength) - if err != nil { t.Error(err) return @@ -102,7 +97,6 @@ func TestGeneratePassword(t *testing.T) { t.Run("should be nonzero length", func(t *testing.T) { generatedPasswordLength := 0 changed, newPassword, _, err := generatePassword(username, password, passwordType, generateNewPassword, generatedPasswordLength) - if err != nil { t.Error(err) return @@ -125,7 +119,6 @@ func TestGeneratePassword(t *testing.T) { t.Run("md5", func(t *testing.T) { changed, _, hashedPassword, err := generatePassword(username, password, passwordType, generateNewPassword, generatedPasswordLength) - if err != nil { t.Error(err) return @@ -144,7 +137,6 @@ func TestGeneratePassword(t *testing.T) { passwordType := pgpassword.SCRAM changed, _, hashedPassword, err := generatePassword(username, password, passwordType, generateNewPassword, generatedPasswordLength) - if err != nil { t.Error(err) return @@ -159,5 +151,4 @@ func TestGeneratePassword(t *testing.T) { } }) }) - } diff --git a/internal/apiserver/userservice/userservice.go b/internal/apiserver/userservice/userservice.go index 83994c90fa..d64462a51a 100644 --- a/internal/apiserver/userservice/userservice.go +++ b/internal/apiserver/userservice/userservice.go @@ -1,7 +1,7 @@ package userservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,10 +17,11 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" log "github.com/sirupsen/logrus" - "net/http" ) // UserHandler provides a means to update a PostgreSQL user @@ -52,7 +53,7 @@ func UpdateUserHandler(w http.ResponseWriter, r *http.Request) { username, err := apiserver.Authn(apiserver.UPDATE_USER_PERM, w, r) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -62,20 +63,20 @@ func UpdateUserHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } _, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = UpdateUser(&request, username) - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } // CreateUserHandler ... @@ -117,20 +118,19 @@ func CreateUserHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = CreateUser(&request, username) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // DeleteUserHandler ... @@ -163,7 +163,7 @@ func DeleteUserHandler(w http.ResponseWriter, r *http.Request) { if request.ClientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -182,13 +182,12 @@ func DeleteUserHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = DeleteUser(&request, pgouser) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } // ShowUserHandler allows one to display information about PostgreSQL uesrs that @@ -237,18 +236,17 @@ func ShowUserHandler(w http.ResponseWriter, r *http.Request) { resp := msgs.ShowUserResponse{} if request.ClientVersion != msgs.PGO_VERSION { resp.Status = msgs.Status{Code: msgs.Error, Msg: apiserver.VERSION_MISMATCH_ERROR} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } _, err = apiserver.GetNamespace(apiserver.Clientset, username, request.Namespace) if err != nil { resp.Status = msgs.Status{Code: msgs.Error, Msg: err.Error()} - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } resp = ShowUser(&request) - json.NewEncoder(w).Encode(resp) - + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/apiserver/versionservice/versionimpl.go b/internal/apiserver/versionservice/versionimpl.go index d2341d4e93..ea21f7fdb4 100644 --- a/internal/apiserver/versionservice/versionimpl.go +++ b/internal/apiserver/versionservice/versionimpl.go @@ -1,7 +1,7 @@ package versionservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/apiserver/versionservice/versionservice.go b/internal/apiserver/versionservice/versionservice.go index 49735dff1a..1bf2dfe0b6 100644 --- a/internal/apiserver/versionservice/versionservice.go +++ b/internal/apiserver/versionservice/versionservice.go @@ -1,7 +1,7 @@ package versionservice /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,9 +17,10 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" log "github.com/sirupsen/logrus" - "net/http" ) // VersionHandler ... @@ -50,7 +51,7 @@ func VersionHandler(w http.ResponseWriter, r *http.Request) { resp := Version() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } // HealthHandler ... @@ -71,7 +72,7 @@ func HealthHandler(w http.ResponseWriter, r *http.Request) { resp := Health() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } // HealthyHandler follows the health endpoint convention of HTTP/200 and @@ -88,5 +89,5 @@ func HealthyHandler(w http.ResponseWriter, r *http.Request) { // '200': // description: "Healthy: server is responding as expected" w.WriteHeader(http.StatusOK) - w.Write([]byte("ok")) + _, _ = w.Write([]byte("ok")) } diff --git a/internal/apiserver/workflowservice/workflowimpl.go b/internal/apiserver/workflowservice/workflowimpl.go index e07ff79d59..91fd1919aa 100644 --- a/internal/apiserver/workflowservice/workflowimpl.go +++ b/internal/apiserver/workflowservice/workflowimpl.go @@ -1,7 +1,7 @@ package workflowservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -34,7 +34,7 @@ func ShowWorkflow(id, ns string) (msgs.ShowWorkflowDetail, error) { log.Debugf("ShowWorkflow called with id %s", id) detail := msgs.ShowWorkflowDetail{} - //get the pgtask for this workflow + // get the pgtask for this workflow selector := crv1.PgtaskWorkflowID + "=" + id @@ -53,5 +53,4 @@ func ShowWorkflow(id, ns string) (msgs.ShowWorkflowDetail, error) { detail.Parameters = t.Spec.Parameters return detail, err - } diff --git a/internal/apiserver/workflowservice/workflowservice.go b/internal/apiserver/workflowservice/workflowservice.go index 81aea9ff98..69f2f21af6 100644 --- a/internal/apiserver/workflowservice/workflowservice.go +++ b/internal/apiserver/workflowservice/workflowservice.go @@ -1,7 +1,7 @@ package workflowservice /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,11 +17,12 @@ limitations under the License. import ( "encoding/json" + "net/http" + "github.com/crunchydata/postgres-operator/internal/apiserver" msgs "github.com/crunchydata/postgres-operator/pkg/apiservermsgs" "github.com/gorilla/mux" log "github.com/sirupsen/logrus" - "net/http" ) // ShowWorkflowHandler ... @@ -79,7 +80,7 @@ func ShowWorkflowHandler(w http.ResponseWriter, r *http.Request) { if clientVersion != msgs.PGO_VERSION { resp.Status.Code = msgs.Error resp.Status.Msg = apiserver.VERSION_MISMATCH_ERROR - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -87,7 +88,7 @@ func ShowWorkflowHandler(w http.ResponseWriter, r *http.Request) { if err != nil { resp.Status.Code = msgs.Error resp.Status.Msg = err.Error() - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) return } @@ -97,5 +98,5 @@ func ShowWorkflowHandler(w http.ResponseWriter, r *http.Request) { resp.Status.Msg = err.Error() } - json.NewEncoder(w).Encode(resp) + _ = json.NewEncoder(w).Encode(resp) } diff --git a/internal/config/annotations.go b/internal/config/annotations.go index 7cf97b96ee..22b8737923 100644 --- a/internal/config/annotations.go +++ b/internal/config/annotations.go @@ -1,7 +1,7 @@ package config /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -21,10 +21,19 @@ const ( ANNOTATION_BACKREST_RESTORE = "pgo-backrest-restore" ANNOTATION_PGHA_BOOTSTRAP_REPLICA = "pgo-pgha-bootstrap-replica" ANNOTATION_PRIMARY_DEPLOYMENT = "primary-deployment" + // ANNOTATION_CLUSTER_KEEP_BACKUPS indicates that if a custom resource is + // deleted, ensure the backups are kept + ANNOTATION_CLUSTER_KEEP_BACKUPS = "keep-backups" + // ANNOTATION_CLUSTER_KEEP_DATA indicates that if a custom resource is + // deleted, ensure the data directory is kept + ANNOTATION_CLUSTER_KEEP_DATA = "keep-data" // annotation to track the cluster's current primary ANNOTATION_CURRENT_PRIMARY = "current-primary" // annotation to indicate whether a cluster has been upgraded ANNOTATION_IS_UPGRADED = "is-upgraded" + // annotation to indicate an upgrade is in progress. this has the effect + // of causeing the rmdata job in pgcluster to not run + ANNOTATION_UPGRADE_IN_PROGRESS = "upgrade-in-progress" // annotation to store the Operator versions upgraded from and to ANNOTATION_UPGRADE_INFO = "upgrade-info" // annotation to store the string boolean, used when checking upgrade status @@ -48,7 +57,7 @@ const ( // ANNOTATION_S3_VERIFY_TLS is for storing the setting that determines whether or not TLS should // be used to access a pgBackRest repository ANNOTATION_S3_VERIFY_TLS = "s3-verify-tls" - // ANNOTATION_S3_BUCKET is for storing the SSHD port used by the pgBackRest repository + // ANNOTATION_SSHD_PORT is for storing the SSHD port used by the pgBackRest repository // service in a cluster ANNOTATION_SSHD_PORT = "sshd-port" // ANNOTATION_SUPPLEMENTAL_GROUPS is for storing the supplemental groups used with a cluster diff --git a/internal/config/defaults.go b/internal/config/defaults.go index d86e404eb7..b4a454d5cf 100644 --- a/internal/config/defaults.go +++ b/internal/config/defaults.go @@ -1,7 +1,7 @@ package config /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/config/images.go b/internal/config/images.go index 34845ee6a1..fe032605f2 100644 --- a/internal/config/images.go +++ b/internal/config/images.go @@ -1,7 +1,7 @@ package config /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,20 +17,15 @@ package config // a list of container images that are available const ( - CONTAINER_IMAGE_PGO_BACKREST = "pgo-backrest" - CONTAINER_IMAGE_PGO_BACKREST_REPO = "pgo-backrest-repo" + CONTAINER_IMAGE_PGO_BACKREST = "crunchy-pgbackrest" + CONTAINER_IMAGE_PGO_BACKREST_REPO = "crunchy-pgbackrest-repo" CONTAINER_IMAGE_PGO_CLIENT = "pgo-client" CONTAINER_IMAGE_PGO_RMDATA = "pgo-rmdata" - CONTAINER_IMAGE_PGO_SQL_RUNNER = "pgo-sqlrunner" - CONTAINER_IMAGE_CRUNCHY_ADMIN = "crunchy-admin" - CONTAINER_IMAGE_CRUNCHY_BACKREST_RESTORE = "crunchy-backrest-restore" CONTAINER_IMAGE_CRUNCHY_POSTGRES_EXPORTER = "crunchy-postgres-exporter" CONTAINER_IMAGE_CRUNCHY_GRAFANA = "crunchy-grafana" CONTAINER_IMAGE_CRUNCHY_PGADMIN = "crunchy-pgadmin4" CONTAINER_IMAGE_CRUNCHY_PGBADGER = "crunchy-pgbadger" CONTAINER_IMAGE_CRUNCHY_PGBOUNCER = "crunchy-pgbouncer" - CONTAINER_IMAGE_CRUNCHY_PGDUMP = "crunchy-pgdump" - CONTAINER_IMAGE_CRUNCHY_PGRESTORE = "crunchy-pgrestore" CONTAINER_IMAGE_CRUNCHY_POSTGRES_HA = "crunchy-postgres-ha" CONTAINER_IMAGE_CRUNCHY_POSTGRES_GIS_HA = "crunchy-postgres-gis-ha" CONTAINER_IMAGE_CRUNCHY_PROMETHEUS = "crunchy-prometheus" @@ -46,15 +41,10 @@ var RelatedImageMap = map[string]string{ "RELATED_IMAGE_PGO_BACKREST_REPO": CONTAINER_IMAGE_PGO_BACKREST_REPO, "RELATED_IMAGE_PGO_CLIENT": CONTAINER_IMAGE_PGO_CLIENT, "RELATED_IMAGE_PGO_RMDATA": CONTAINER_IMAGE_PGO_RMDATA, - "RELATED_IMAGE_PGO_SQL_RUNNER": CONTAINER_IMAGE_PGO_SQL_RUNNER, - "RELATED_IMAGE_CRUNCHY_ADMIN": CONTAINER_IMAGE_CRUNCHY_ADMIN, - "RELATED_IMAGE_CRUNCHY_BACKREST_RESTORE": CONTAINER_IMAGE_CRUNCHY_BACKREST_RESTORE, "RELATED_IMAGE_CRUNCHY_POSTGRES_EXPORTER": CONTAINER_IMAGE_CRUNCHY_POSTGRES_EXPORTER, "RELATED_IMAGE_CRUNCHY_PGADMIN": CONTAINER_IMAGE_CRUNCHY_PGADMIN, "RELATED_IMAGE_CRUNCHY_PGBADGER": CONTAINER_IMAGE_CRUNCHY_PGBADGER, "RELATED_IMAGE_CRUNCHY_PGBOUNCER": CONTAINER_IMAGE_CRUNCHY_PGBOUNCER, - "RELATED_IMAGE_CRUNCHY_PGDUMP": CONTAINER_IMAGE_CRUNCHY_PGDUMP, - "RELATED_IMAGE_CRUNCHY_PGRESTORE": CONTAINER_IMAGE_CRUNCHY_PGRESTORE, "RELATED_IMAGE_CRUNCHY_POSTGRES_HA": CONTAINER_IMAGE_CRUNCHY_POSTGRES_HA, "RELATED_IMAGE_CRUNCHY_POSTGRES_GIS_HA": CONTAINER_IMAGE_CRUNCHY_POSTGRES_GIS_HA, } diff --git a/internal/config/labels.go b/internal/config/labels.go index 6a24b72494..eb60097f98 100644 --- a/internal/config/labels.go +++ b/internal/config/labels.go @@ -1,7 +1,7 @@ package config /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,149 +16,172 @@ package config */ // resource labels used by the operator -const LABEL_NAME = "name" -const LABEL_SELECTOR = "selector" -const LABEL_OPERATOR = "postgres-operator" -const LABEL_PG_CLUSTER = "pg-cluster" -const LABEL_PG_CLUSTER_IDENTIFIER = "pg-cluster-id" -const LABEL_PG_DATABASE = "pgo-pg-database" +const ( + LABEL_NAME = "name" + LABEL_SELECTOR = "selector" + LABEL_OPERATOR = "postgres-operator" + LABEL_PG_CLUSTER = "pg-cluster" + LABEL_PG_DATABASE = "pgo-pg-database" +) const LABEL_PGTASK = "pg-task" -const LABEL_AUTOFAIL = "autofail" -const LABEL_FAILOVER = "failover" - -const LABEL_TARGET = "target" -const LABEL_RMDATA = "pgrmdata" - -const LABEL_PGPOLICY = "pgpolicy" -const LABEL_INGEST = "ingest" -const LABEL_PGREMOVE = "pgremove" -const LABEL_PVCNAME = "pvcname" -const LABEL_EXPORTER = "crunchy-postgres-exporter" -const LABEL_EXPORTER_PG_USER = "ccp_monitoring" -const LABEL_ARCHIVE = "archive" -const LABEL_ARCHIVE_TIMEOUT = "archive-timeout" -const LABEL_CUSTOM_CONFIG = "custom-config" -const LABEL_NODE_LABEL_KEY = "NodeLabelKey" -const LABEL_NODE_LABEL_VALUE = "NodeLabelValue" -const LABEL_REPLICA_NAME = "replica-name" -const LABEL_CCP_IMAGE_TAG_KEY = "ccp-image-tag" -const LABEL_CCP_IMAGE_KEY = "ccp-image" -const LABEL_IMAGE_PREFIX = "image-prefix" -const LABEL_SERVICE_TYPE = "service-type" -const LABEL_POD_ANTI_AFFINITY = "pg-pod-anti-affinity" -const LABEL_SYNC_REPLICATION = "sync-replication" - -const LABEL_REPLICA_COUNT = "replica-count" -const LABEL_STORAGE_CONFIG = "storage-config" -const LABEL_NODE_LABEL = "node-label" -const LABEL_VERSION = "version" -const LABEL_PGO_VERSION = "pgo-version" -const LABEL_DELETE_DATA = "delete-data" -const LABEL_DELETE_DATA_STARTED = "delete-data-started" -const LABEL_DELETE_BACKUPS = "delete-backups" -const LABEL_IS_REPLICA = "is-replica" -const LABEL_IS_BACKUP = "is-backup" -const LABEL_STARTUP = "startup" -const LABEL_SHUTDOWN = "shutdown" +const LABEL_RESTART = "restart" + +const ( + LABEL_RMDATA = "pgrmdata" +) + +const ( + LABEL_PGPOLICY = "pgpolicy" + LABEL_PGREMOVE = "pgremove" + LABEL_PVCNAME = "pvcname" + LABEL_EXPORTER = "crunchy-postgres-exporter" + LABEL_ARCHIVE = "archive" + LABEL_ARCHIVE_TIMEOUT = "archive-timeout" + LABEL_NODE_AFFINITY_TYPE = "node-affinity-type" + LABEL_NODE_LABEL_KEY = "NodeLabelKey" + LABEL_NODE_LABEL_VALUE = "NodeLabelValue" + LABEL_REPLICA_NAME = "replica-name" + LABEL_CCP_IMAGE_TAG_KEY = "ccp-image-tag" + LABEL_CCP_IMAGE_KEY = "ccp-image" + LABEL_IMAGE_PREFIX = "image-prefix" + LABEL_POD_ANTI_AFFINITY = "pg-pod-anti-affinity" +) + +const ( + LABEL_REPLICA_COUNT = "replica-count" + LABEL_STORAGE_CONFIG = "storage-config" + LABEL_NODE_LABEL = "node-label" + LABEL_VERSION = "version" + LABEL_PGO_VERSION = "pgo-version" + LABEL_DELETE_DATA = "delete-data" + LABEL_DELETE_DATA_STARTED = "delete-data-started" + LABEL_DELETE_BACKUPS = "delete-backups" + LABEL_IS_REPLICA = "is-replica" + LABEL_IS_BACKUP = "is-backup" + LABEL_RM_TOLERATIONS = "rmdata-tolerations" + LABEL_STARTUP = "startup" + LABEL_SHUTDOWN = "shutdown" +) // label for the pgcluster upgrade const LABEL_UPGRADE = "upgrade" -const LABEL_BACKREST = "pgo-backrest" -const LABEL_BACKREST_JOB = "pgo-backrest-job" -const LABEL_BACKREST_RESTORE = "pgo-backrest-restore" -const LABEL_CONTAINER_NAME = "containername" -const LABEL_POD_NAME = "podname" -const LABEL_BACKREST_REPO_SECRET = "backrest-repo-config" -const LABEL_BACKREST_COMMAND = "backrest-command" -const LABEL_BACKREST_RESTORE_FROM_CLUSTER = "backrest-restore-from-cluster" -const LABEL_BACKREST_RESTORE_OPTS = "backrest-restore-opts" -const LABEL_BACKREST_BACKUP_OPTS = "backrest-backup-opts" -const LABEL_BACKREST_OPTS = "backrest-opts" -const LABEL_BACKREST_PITR_TARGET = "backrest-pitr-target" -const LABEL_BACKREST_STORAGE_TYPE = "backrest-storage-type" -const LABEL_BACKREST_S3_VERIFY_TLS = "backrest-s3-verify-tls" -const LABEL_BADGER = "crunchy-pgbadger" -const LABEL_BADGER_CCPIMAGE = "crunchy-pgbadger" -const LABEL_BACKUP_TYPE_BACKREST = "pgbackrest" -const LABEL_BACKUP_TYPE_PGDUMP = "pgdump" - -const LABEL_PGDUMP_COMMAND = "pgdump" -const LABEL_PGDUMP_RESTORE = "pgdump-restore" -const LABEL_PGDUMP_OPTS = "pgdump-opts" -const LABEL_PGDUMP_HOST = "pgdump-host" -const LABEL_PGDUMP_DB = "pgdump-db" -const LABEL_PGDUMP_USER = "pgdump-user" -const LABEL_PGDUMP_PORT = "pgdump-port" -const LABEL_PGDUMP_ALL = "pgdump-all" -const LABEL_PGDUMP_PVC = "pgdump-pvc" - -const LABEL_RESTORE_TYPE_PGRESTORE = "pgrestore" -const LABEL_PGRESTORE_COMMAND = "pgrestore" -const LABEL_PGRESTORE_HOST = "pgrestore-host" -const LABEL_PGRESTORE_DB = "pgrestore-db" -const LABEL_PGRESTORE_USER = "pgrestore-user" -const LABEL_PGRESTORE_PORT = "pgrestore-port" -const LABEL_PGRESTORE_FROM_CLUSTER = "pgrestore-from-cluster" -const LABEL_PGRESTORE_FROM_PVC = "pgrestore-from-pvc" -const LABEL_PGRESTORE_OPTS = "pgrestore-opts" -const LABEL_PGRESTORE_PITR_TARGET = "pgrestore-pitr-target" - -const LABEL_DATA_ROOT = "data-root" -const LABEL_PVC_NAME = "pvc-name" -const LABEL_VOLUME_NAME = "volume-name" - -const LABEL_SESSION_ID = "sessionid" -const LABEL_USERNAME = "username" -const LABEL_ROLENAME = "rolename" -const LABEL_PASSWORD = "password" - -const LABEL_PGADMIN = "crunchy-pgadmin" -const LABEL_PGADMIN_TASK_ADD = "pgadmin-add" -const LABEL_PGADMIN_TASK_CLUSTER = "pgadmin-cluster" -const LABEL_PGADMIN_TASK_DELETE = "pgadmin-delete" +const ( + LABEL_BACKREST = "pgo-backrest" + LABEL_BACKREST_JOB = "pgo-backrest-job" + LABEL_BACKREST_RESTORE = "pgo-backrest-restore" + LABEL_CONTAINER_NAME = "containername" + LABEL_POD_NAME = "podname" + // #nosec: G101 + LABEL_BACKREST_REPO_SECRET = "backrest-repo-config" + LABEL_BACKREST_COMMAND = "backrest-command" + LABEL_BACKREST_RESTORE_FROM_CLUSTER = "backrest-restore-from-cluster" + LABEL_BACKREST_RESTORE_OPTS = "backrest-restore-opts" + LABEL_BACKREST_BACKUP_OPTS = "backrest-backup-opts" + LABEL_BACKREST_OPTS = "backrest-opts" + LABEL_BACKREST_PITR_TARGET = "backrest-pitr-target" + LABEL_BACKREST_STORAGE_TYPE = "backrest-storage-type" + LABEL_BACKREST_S3_VERIFY_TLS = "backrest-s3-verify-tls" + LABEL_BACKUP_TYPE_BACKREST = "pgbackrest" + LABEL_BACKUP_TYPE_PGDUMP = "pgdump" +) + +const ( + LABEL_PGDUMP_COMMAND = "pgdump" + LABEL_PGDUMP_RESTORE = "pgdump-restore" + LABEL_PGDUMP_OPTS = "pgdump-opts" + LABEL_PGDUMP_HOST = "pgdump-host" + LABEL_PGDUMP_DB = "pgdump-db" + LABEL_PGDUMP_USER = "pgdump-user" + LABEL_PGDUMP_PORT = "pgdump-port" + LABEL_PGDUMP_ALL = "pgdump-all" + LABEL_PGDUMP_PVC = "pgdump-pvc" +) + +const ( + LABEL_RESTORE_TYPE_PGRESTORE = "pgrestore" + LABEL_PGRESTORE_COMMAND = "pgrestore" + LABEL_PGRESTORE_HOST = "pgrestore-host" + LABEL_PGRESTORE_DB = "pgrestore-db" + LABEL_PGRESTORE_USER = "pgrestore-user" + LABEL_PGRESTORE_PORT = "pgrestore-port" + LABEL_PGRESTORE_FROM_CLUSTER = "pgrestore-from-cluster" + LABEL_PGRESTORE_FROM_PVC = "pgrestore-from-pvc" + LABEL_PGRESTORE_OPTS = "pgrestore-opts" + LABEL_PGRESTORE_PITR_TARGET = "pgrestore-pitr-target" +) + +const ( + LABEL_DATA_ROOT = "data-root" + LABEL_PVC_NAME = "pvc-name" + LABEL_VOLUME_NAME = "volume-name" +) + +const ( + LABEL_SESSION_ID = "sessionid" + LABEL_USERNAME = "username" + LABEL_ROLENAME = "rolename" + LABEL_PASSWORD = "password" +) + +const ( + LABEL_PGADMIN = "crunchy-pgadmin" + LABEL_PGADMIN_TASK_ADD = "pgadmin-add" + LABEL_PGADMIN_TASK_CLUSTER = "pgadmin-cluster" + LABEL_PGADMIN_TASK_DELETE = "pgadmin-delete" +) const LABEL_PGBOUNCER = "crunchy-pgbouncer" -const LABEL_JOB_NAME = "job-name" -const LABEL_PGBACKREST_STANZA = "pgbackrest-stanza" -const LABEL_PGBACKREST_DB_PATH = "pgbackrest-db-path" -const LABEL_PGBACKREST_REPO_PATH = "pgbackrest-repo-path" -const LABEL_PGBACKREST_REPO_HOST = "pgbackrest-repo-host" +const ( + LABEL_JOB_NAME = "job-name" + LABEL_PGBACKREST_STANZA = "pgbackrest-stanza" + LABEL_PGBACKREST_DB_PATH = "pgbackrest-db-path" + LABEL_PGBACKREST_REPO_PATH = "pgbackrest-repo-path" + LABEL_PGBACKREST_REPO_HOST = "pgbackrest-repo-host" +) const LABEL_PGO_BACKREST_REPO = "pgo-backrest-repo" -const LABEL_DEPLOYMENT_NAME = "deployment-name" -const LABEL_SERVICE_NAME = "service-name" -const LABEL_CURRENT_PRIMARY = "current-primary" +const ( + LABEL_DEPLOYMENT_NAME = "deployment-name" + LABEL_SERVICE_NAME = "service-name" + LABEL_CURRENT_PRIMARY = "current-primary" +) const LABEL_CLAIM_NAME = "claimName" -const LABEL_PGO_PGOUSER = "pgo-pgouser" -const LABEL_PGO_PGOROLE = "pgo-pgorole" -const LABEL_PGOUSER = "pgouser" -const LABEL_WORKFLOW_ID = "workflowid" // NOTE: this now matches crv1.PgtaskWorkflowID - -const LABEL_TRUE = "true" -const LABEL_FALSE = "false" - -const LABEL_NAMESPACE = "namespace" -const LABEL_PGO_INSTALLATION_NAME = "pgo-installation-name" -const LABEL_VENDOR = "vendor" -const LABEL_CRUNCHY = "crunchydata" -const LABEL_PGO_CREATED_BY = "pgo-created-by" -const LABEL_PGO_UPDATED_BY = "pgo-updated-by" - -const LABEL_FAILOVER_STARTED = "failover-started" +const ( + LABEL_PGO_PGOUSER = "pgo-pgouser" + LABEL_PGO_PGOROLE = "pgo-pgorole" + LABEL_PGOUSER = "pgouser" + LABEL_WORKFLOW_ID = "workflowid" // NOTE: this now matches crv1.PgtaskWorkflowID +) + +const ( + LABEL_TRUE = "true" + LABEL_FALSE = "false" +) + +const ( + LABEL_NAMESPACE = "namespace" + LABEL_PGO_INSTALLATION_NAME = "pgo-installation-name" + LABEL_VENDOR = "vendor" + LABEL_CRUNCHY = "crunchydata" + LABEL_PGO_CREATED_BY = "pgo-created-by" + LABEL_PGO_UPDATED_BY = "pgo-updated-by" +) const GLOBAL_CUSTOM_CONFIGMAP = "pgo-custom-pg-config" -const LABEL_PGHA_SCOPE = "crunchy-pgha-scope" -const LABEL_PGHA_CONFIGMAP = "pgha-config" -const LABEL_PGHA_BACKUP_TYPE = "pgha-backup-type" -const LABEL_PGHA_ROLE = "role" -const LABEL_PGHA_ROLE_PRIMARY = "master" -const LABEL_PGHA_ROLE_REPLICA = "replica" -const LABEL_PGHA_BOOTSTRAP = "pgha-bootstrap" +const ( + LABEL_PGHA_SCOPE = "crunchy-pgha-scope" + LABEL_PGHA_CONFIGMAP = "pgha-config" + LABEL_PGHA_BACKUP_TYPE = "pgha-backup-type" + LABEL_PGHA_ROLE = "role" + LABEL_PGHA_ROLE_PRIMARY = "master" + LABEL_PGHA_ROLE_REPLICA = "replica" + LABEL_PGHA_BOOTSTRAP = "pgha-bootstrap" +) diff --git a/internal/config/pgoconfig.go b/internal/config/pgoconfig.go index b867aa8d93..bf87e5253f 100644 --- a/internal/config/pgoconfig.go +++ b/internal/config/pgoconfig.go @@ -1,7 +1,7 @@ package config /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -21,6 +21,7 @@ import ( "fmt" "io/ioutil" "os" + "path/filepath" "strconv" "strings" "text/template" @@ -29,6 +30,7 @@ import ( log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation" @@ -36,9 +38,12 @@ import ( "sigs.k8s.io/yaml" ) -const CustomConfigMapName = "pgo-config" -const DefaultConfigsPath = "/default-pgo-config/" -const CustomConfigsPath = "/pgo-config/" +const ( + CustomConfigMapName = "pgo-config" + defaultConfigPath = "/default-pgo-config/" + openShiftSCCGroup = "security.openshift.io" + openShiftSCCKind = "SecurityContextConstraints" +) var PgoDefaultServiceAccountTemplate *template.Template @@ -92,10 +97,6 @@ var ContainerResourcesTemplate *template.Template const containerResourcesTemplatePath = "container-resources.json" -var AffinityTemplate *template.Template - -const affinityTemplatePath = "affinity.json" - var PodAntiAffinityTemplate *template.Template const podAntiAffinityTemplatePath = "pod-anti-affinity.json" @@ -202,7 +203,7 @@ type ClusterStruct struct { PasswordAgeDays string PasswordLength string Replicas string - ServiceType string + ServiceType v1.ServiceType BackrestPort int BackrestS3Bucket string BackrestS3Endpoint string @@ -210,8 +211,6 @@ type ClusterStruct struct { BackrestS3URIStyle string BackrestS3VerifyTLS string DisableAutofail bool - PgmonitorPassword string - EnableCrunchyadm bool DisableReplicaStartFailReinit bool PodAntiAffinity string PodAntiAffinityPgBackRest string @@ -221,7 +220,7 @@ type ClusterStruct struct { DefaultBackrestResourceMemory resource.Quantity `json:"DefaultBackrestMemory"` DefaultPgBouncerResourceMemory resource.Quantity `json:"DefaultPgBouncerMemory"` DefaultExporterResourceMemory resource.Quantity `json:"DefaultExporterMemory"` - DisableFSGroup bool + DisableFSGroup *bool } type StorageStruct struct { @@ -258,19 +257,22 @@ type PgoConfig struct { ReplicaStorage string BackrestStorage string Storage map[string]StorageStruct + OpenShift bool } -const DEFAULT_SERVICE_TYPE = "ClusterIP" -const LOAD_BALANCER_SERVICE_TYPE = "LoadBalancer" -const NODEPORT_SERVICE_TYPE = "NodePort" -const CONFIG_PATH = "pgo.yaml" +const ( + DefaultServiceType = v1.ServiceTypeClusterIP + CONFIG_PATH = "pgo.yaml" +) -const DEFAULT_BACKREST_PORT = 2022 -const DEFAULT_PGADMIN_PORT = "5050" -const DEFAULT_PGBADGER_PORT = "10000" -const DEFAULT_EXPORTER_PORT = "9187" -const DEFAULT_POSTGRES_PORT = "5432" -const DEFAULT_PATRONI_PORT = "8009" +const ( + DEFAULT_BACKREST_PORT = 2022 + DEFAULT_PGADMIN_PORT = "5050" + DEFAULT_PGBADGER_PORT = "10000" + DEFAULT_EXPORTER_PORT = "9187" + DEFAULT_POSTGRES_PORT = "5432" + DEFAULT_PATRONI_PORT = "8009" +) func (c *PgoConfig) Validate() error { var err error @@ -340,15 +342,12 @@ func (c *PgoConfig) Validate() error { return errors.New(errPrefix + "Pgo.PGOImageTag is required") } - if c.Cluster.ServiceType == "" { - log.Warn("Cluster.ServiceType not set, using default, ClusterIP ") - c.Cluster.ServiceType = DEFAULT_SERVICE_TYPE - } else { - if c.Cluster.ServiceType != DEFAULT_SERVICE_TYPE && - c.Cluster.ServiceType != LOAD_BALANCER_SERVICE_TYPE && - c.Cluster.ServiceType != NODEPORT_SERVICE_TYPE { - return errors.New(errPrefix + "Cluster.ServiceType is required to be either ClusterIP, NodePort, or LoadBalancer") - } + // if ServiceType is set, ensure it is valid + switch c.Cluster.ServiceType { + default: + return fmt.Errorf("Cluster.ServiceType is an invalid ServiceType: %q", c.Cluster.ServiceType) + case v1.ServiceTypeClusterIP, v1.ServiceTypeNodePort, + v1.ServiceTypeLoadBalancer, v1.ServiceTypeExternalName, "": // no-op } if c.Cluster.CCPImagePrefix == "" { @@ -508,38 +507,31 @@ func (c *PgoConfig) GetStorageSpec(name string) (crv1.PgStorageSpec, error) { } return storage, err - } func (c *PgoConfig) GetConfig(clientset kubernetes.Interface, namespace string) error { + cMap, err := initialize(clientset, namespace) + if err != nil { + log.Errorf("could not get ConfigMap: %s", err.Error()) + return err + } - cMap, rootPath := getRootPath(clientset, namespace) - - var yamlFile []byte - var err error - - //get the pgo.yaml config file - if cMap != nil { - str := cMap.Data[CONFIG_PATH] - if str == "" { - errMsg := fmt.Sprintf("could not get %s from ConfigMap", CONFIG_PATH) - return errors.New(errMsg) - } - yamlFile = []byte(str) - } else { - yamlFile, err = ioutil.ReadFile(rootPath + CONFIG_PATH) - if err != nil { - log.Errorf("yamlFile.Get err #%v ", err) - return err - } + // get the pgo.yaml config file + str := cMap.Data[CONFIG_PATH] + if str == "" { + return fmt.Errorf("could not get %s from ConfigMap", CONFIG_PATH) } - err = yaml.Unmarshal(yamlFile, c) - if err != nil { + yamlFile := []byte(str) + + if err := yaml.Unmarshal(yamlFile, c); err != nil { log.Errorf("Unmarshal: %v", err) return err } + // determine if this cluster is inside openshift + c.OpenShift = isOpenShift(clientset) + // validate the pgo.yaml config file if err := c.Validate(); err != nil { log.Error(err) @@ -548,179 +540,174 @@ func (c *PgoConfig) GetConfig(clientset kubernetes.Interface, namespace string) c.CheckEnv() - //load up all the templates - PgoDefaultServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGODefaultServiceAccountPath) - if err != nil { - return err - } - PgoBackrestServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGOBackrestServiceAccountPath) + // load up all the templates + PgoDefaultServiceAccountTemplate, err = c.LoadTemplate(cMap, PGODefaultServiceAccountPath) if err != nil { return err } - PgoTargetServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGOTargetServiceAccountPath) + PgoBackrestServiceAccountTemplate, err = c.LoadTemplate(cMap, PGOBackrestServiceAccountPath) if err != nil { return err } - PgoTargetRoleBindingTemplate, err = c.LoadTemplate(cMap, rootPath, PGOTargetRoleBindingPath) + PgoTargetServiceAccountTemplate, err = c.LoadTemplate(cMap, PGOTargetServiceAccountPath) if err != nil { return err } - PgoBackrestRoleTemplate, err = c.LoadTemplate(cMap, rootPath, PGOBackrestRolePath) + PgoTargetRoleBindingTemplate, err = c.LoadTemplate(cMap, PGOTargetRoleBindingPath) if err != nil { return err } - PgoBackrestRoleBindingTemplate, err = c.LoadTemplate(cMap, rootPath, PGOBackrestRoleBindingPath) + PgoBackrestRoleTemplate, err = c.LoadTemplate(cMap, PGOBackrestRolePath) if err != nil { return err } - PgoTargetRoleTemplate, err = c.LoadTemplate(cMap, rootPath, PGOTargetRolePath) + PgoBackrestRoleBindingTemplate, err = c.LoadTemplate(cMap, PGOBackrestRoleBindingPath) if err != nil { return err } - PgoPgServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGOPgServiceAccountPath) + PgoTargetRoleTemplate, err = c.LoadTemplate(cMap, PGOTargetRolePath) if err != nil { return err } - PgoPgRoleTemplate, err = c.LoadTemplate(cMap, rootPath, PGOPgRolePath) + PgoPgServiceAccountTemplate, err = c.LoadTemplate(cMap, PGOPgServiceAccountPath) if err != nil { return err } - PgoPgRoleBindingTemplate, err = c.LoadTemplate(cMap, rootPath, PGOPgRoleBindingPath) + PgoPgRoleTemplate, err = c.LoadTemplate(cMap, PGOPgRolePath) if err != nil { return err } - - PVCTemplate, err = c.LoadTemplate(cMap, rootPath, pvcPath) + PgoPgRoleBindingTemplate, err = c.LoadTemplate(cMap, PGOPgRoleBindingPath) if err != nil { return err } - PolicyJobTemplate, err = c.LoadTemplate(cMap, rootPath, policyJobTemplatePath) + PVCTemplate, err = c.LoadTemplate(cMap, pvcPath) if err != nil { return err } - ContainerResourcesTemplate, err = c.LoadTemplate(cMap, rootPath, containerResourcesTemplatePath) + PolicyJobTemplate, err = c.LoadTemplate(cMap, policyJobTemplatePath) if err != nil { return err } - PgoBackrestRepoServiceTemplate, err = c.LoadTemplate(cMap, rootPath, pgoBackrestRepoServiceTemplatePath) + ContainerResourcesTemplate, err = c.LoadTemplate(cMap, containerResourcesTemplatePath) if err != nil { return err } - PgoBackrestRepoTemplate, err = c.LoadTemplate(cMap, rootPath, pgoBackrestRepoTemplatePath) + PgoBackrestRepoServiceTemplate, err = c.LoadTemplate(cMap, pgoBackrestRepoServiceTemplatePath) if err != nil { return err } - PgmonitorEnvVarsTemplate, err = c.LoadTemplate(cMap, rootPath, pgmonitorEnvVarsPath) + PgoBackrestRepoTemplate, err = c.LoadTemplate(cMap, pgoBackrestRepoTemplatePath) if err != nil { return err } - PgbackrestEnvVarsTemplate, err = c.LoadTemplate(cMap, rootPath, pgbackrestEnvVarsPath) + PgmonitorEnvVarsTemplate, err = c.LoadTemplate(cMap, pgmonitorEnvVarsPath) if err != nil { return err } - PgbackrestS3EnvVarsTemplate, err = c.LoadTemplate(cMap, rootPath, pgbackrestS3EnvVarsPath) + PgbackrestEnvVarsTemplate, err = c.LoadTemplate(cMap, pgbackrestEnvVarsPath) if err != nil { return err } - PgAdminTemplate, err = c.LoadTemplate(cMap, rootPath, pgAdminTemplatePath) + PgbackrestS3EnvVarsTemplate, err = c.LoadTemplate(cMap, pgbackrestS3EnvVarsPath) if err != nil { return err } - PgAdminServiceTemplate, err = c.LoadTemplate(cMap, rootPath, pgAdminServiceTemplatePath) + PgAdminTemplate, err = c.LoadTemplate(cMap, pgAdminTemplatePath) if err != nil { return err } - PgbouncerTemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerTemplatePath) + PgAdminServiceTemplate, err = c.LoadTemplate(cMap, pgAdminServiceTemplatePath) if err != nil { return err } - PgbouncerConfTemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerConfTemplatePath) + PgbouncerTemplate, err = c.LoadTemplate(cMap, pgbouncerTemplatePath) if err != nil { return err } - PgbouncerUsersTemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerUsersTemplatePath) + PgbouncerConfTemplate, err = c.LoadTemplate(cMap, pgbouncerConfTemplatePath) if err != nil { return err } - PgbouncerHBATemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerHBATemplatePath) + PgbouncerUsersTemplate, err = c.LoadTemplate(cMap, pgbouncerUsersTemplatePath) if err != nil { return err } - ServiceTemplate, err = c.LoadTemplate(cMap, rootPath, serviceTemplatePath) + PgbouncerHBATemplate, err = c.LoadTemplate(cMap, pgbouncerHBATemplatePath) if err != nil { return err } - RmdatajobTemplate, err = c.LoadTemplate(cMap, rootPath, rmdatajobPath) + ServiceTemplate, err = c.LoadTemplate(cMap, serviceTemplatePath) if err != nil { return err } - BackrestjobTemplate, err = c.LoadTemplate(cMap, rootPath, backrestjobPath) + RmdatajobTemplate, err = c.LoadTemplate(cMap, rmdatajobPath) if err != nil { return err } - PgDumpBackupJobTemplate, err = c.LoadTemplate(cMap, rootPath, pgDumpBackupJobPath) + BackrestjobTemplate, err = c.LoadTemplate(cMap, backrestjobPath) if err != nil { return err } - PgRestoreJobTemplate, err = c.LoadTemplate(cMap, rootPath, pgRestoreJobPath) + PgDumpBackupJobTemplate, err = c.LoadTemplate(cMap, pgDumpBackupJobPath) if err != nil { return err } - PVCMatchLabelsTemplate, err = c.LoadTemplate(cMap, rootPath, pvcMatchLabelsPath) + PgRestoreJobTemplate, err = c.LoadTemplate(cMap, pgRestoreJobPath) if err != nil { return err } - PVCStorageClassTemplate, err = c.LoadTemplate(cMap, rootPath, pvcSCPath) + PVCMatchLabelsTemplate, err = c.LoadTemplate(cMap, pvcMatchLabelsPath) if err != nil { return err } - AffinityTemplate, err = c.LoadTemplate(cMap, rootPath, affinityTemplatePath) + PVCStorageClassTemplate, err = c.LoadTemplate(cMap, pvcSCPath) if err != nil { return err } - PodAntiAffinityTemplate, err = c.LoadTemplate(cMap, rootPath, podAntiAffinityTemplatePath) + PodAntiAffinityTemplate, err = c.LoadTemplate(cMap, podAntiAffinityTemplatePath) if err != nil { return err } - ExporterTemplate, err = c.LoadTemplate(cMap, rootPath, exporterTemplatePath) + ExporterTemplate, err = c.LoadTemplate(cMap, exporterTemplatePath) if err != nil { return err } - BadgerTemplate, err = c.LoadTemplate(cMap, rootPath, badgerTemplatePath) + BadgerTemplate, err = c.LoadTemplate(cMap, badgerTemplatePath) if err != nil { return err } - DeploymentTemplate, err = c.LoadTemplate(cMap, rootPath, deploymentTemplatePath) + DeploymentTemplate, err = c.LoadTemplate(cMap, deploymentTemplatePath) if err != nil { return err } - BootstrapTemplate, err = c.LoadTemplate(cMap, rootPath, bootstrapTemplatePath) + BootstrapTemplate, err = c.LoadTemplate(cMap, bootstrapTemplatePath) if err != nil { return err } @@ -728,20 +715,78 @@ func (c *PgoConfig) GetConfig(clientset kubernetes.Interface, namespace string) return nil } -func getRootPath(clientset kubernetes.Interface, namespace string) (*v1.ConfigMap, string) { +// getOperatorConfigMap returns the config map that contains all of the +// configuration for the Operator +func getOperatorConfigMap(clientset kubernetes.Interface, namespace string) (*v1.ConfigMap, error) { + ctx := context.TODO() + + return clientset.CoreV1().ConfigMaps(namespace).Get(ctx, CustomConfigMapName, metav1.GetOptions{}) +} + +// initialize attempts to get the configuration ConfigMap based on a name. +// If the ConfigMap does not exist, a ConfigMap is created from the default +// configuration path +func initialize(clientset kubernetes.Interface, namespace string) (*v1.ConfigMap, error) { ctx := context.TODO() - cMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, CustomConfigMapName, metav1.GetOptions{}) - if err == nil { - log.Infof("Config: %s ConfigMap found, using config files from the configmap", CustomConfigMapName) - return cMap, "" + + // if the ConfigMap exists, exit + if cm, err := getOperatorConfigMap(clientset, namespace); err == nil { + log.Infof("Config: %q ConfigMap found, using config files from the configmap", CustomConfigMapName) + return cm, nil + } + + // otherwise, create a ConfigMap + log.Infof("Config: %q ConfigMap NOT found, creating ConfigMap from files from %q", CustomConfigMapName, defaultConfigPath) + + cm := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: CustomConfigMapName, + Labels: map[string]string{ + LABEL_VENDOR: LABEL_CRUNCHY, + }, + }, + Data: map[string]string{}, + } + + // get all of the file names that are in the default configuration directory + if err := filepath.Walk(defaultConfigPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // skip if a directory + if info.IsDir() { + return nil + } + + // get all of the contents of a default configuration and load it into + // a ConfigMap + if contents, err := ioutil.ReadFile(path); err != nil { + return err + } else { + cm.Data[info.Name()] = string(contents) + } + + return nil + }); err != nil { + return nil, err + } + + // create the ConfigMap. If the error is that the ConfigMap was already + // created, then grab the new ConfigMap + if _, err := clientset.CoreV1().ConfigMaps(namespace).Create(ctx, cm, metav1.CreateOptions{}); err != nil { + if kerrors.IsAlreadyExists(err) { + return getOperatorConfigMap(clientset, namespace) + } + + return nil, err } - log.Infof("Config: %s ConfigMap NOT found, using default baked-in config files from %s", CustomConfigMapName, DefaultConfigsPath) - return nil, DefaultConfigsPath + return cm, nil } // LoadTemplate will load a JSON template from a path -func (c *PgoConfig) LoadTemplate(cMap *v1.ConfigMap, rootPath, path string) (*template.Template, error) { +func (c *PgoConfig) LoadTemplate(cMap *v1.ConfigMap, path string) (*template.Template, error) { var value string var err error @@ -764,20 +809,18 @@ func (c *PgoConfig) LoadTemplate(cMap *v1.ConfigMap, rootPath, path string) (*te // if we have a value for the templated file, return return template.Must(template.New(path).Parse(value)), nil - } // DefaultTemplate attempts to load a default configuration template file func (c *PgoConfig) DefaultTemplate(path string) (string, error) { // set the lookup value for the file path based on the default configuration // path and the template file requested to be loaded - fullPath := DefaultConfigsPath + path + fullPath := defaultConfigPath + path log.Debugf("No entry in cmap loading default path [%s]", fullPath) // read in the file from the default path buf, err := ioutil.ReadFile(fullPath) - if err != nil { log.Errorf("error: could not read %s", fullPath) log.Error(err) @@ -817,3 +860,42 @@ func (c *PgoConfig) CheckEnv() { log.Infof("CheckEnv: using CCP_IMAGE_PREFIX env var: %s", ccpImagePrefix) } } + +// HasDisableFSGroup returns either the value of DisableFSGroup if it is +// explicitly set; otherwise it will determine the value from the environment +func (c *PgoConfig) DisableFSGroup() bool { + if c.Cluster.DisableFSGroup != nil { + log.Debugf("setting disable fsgroup to %t", *c.Cluster.DisableFSGroup) + return *c.Cluster.DisableFSGroup + } + + // if this is OpenShift, disable the FSGroup + log.Debugf("setting disable fsgroup to %t", c.OpenShift) + return c.OpenShift +} + +// isOpenShift returns true if we've detected that we're in an OpenShift cluster +func isOpenShift(clientset kubernetes.Interface) bool { + _, resourceLists, err := clientset.Discovery().ServerGroupsAndResources() + + if err != nil { + log.Errorf("could not get server api groups: %s", err.Error()) + return false + } + + // If we detect that the "SecurityContextConstraints" Kind is present in the + // "security.openshift.io" Group, we'll return that this is an OpenShift + // environment + for _, rl := range resourceLists { + if strings.HasPrefix(rl.GroupVersion, openShiftSCCGroup+"/") { + for _, r := range rl.APIResources { + if r.Kind == openShiftSCCKind { + log.Info("detected OpenShift environment") + return true + } + } + } + } + + return false +} diff --git a/internal/config/secrets.go b/internal/config/secrets.go new file mode 100644 index 0000000000..0d9a680b89 --- /dev/null +++ b/internal/config/secrets.go @@ -0,0 +1,19 @@ +package config + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// #nosec: G101 +const SecretOperatorBackrestRepoConfig = "pgo-backrest-repo-config" diff --git a/internal/config/volumes.go b/internal/config/volumes.go index d21c2d6a4e..562c7cb105 100644 --- a/internal/config/volumes.go +++ b/internal/config/volumes.go @@ -1,7 +1,7 @@ package config /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -22,8 +22,10 @@ import ( ) // volume configuration settings used by the PostgreSQL data directory and mount -const VOLUME_POSTGRESQL_DATA = "pgdata" -const VOLUME_POSTGRESQL_DATA_MOUNT_PATH = "/pgdata" +const ( + VOLUME_POSTGRESQL_DATA = "pgdata" + VOLUME_POSTGRESQL_DATA_MOUNT_PATH = "/pgdata" +) // PostgreSQLWALVolumeMount returns the VolumeMount for the PostgreSQL WAL directory. func PostgreSQLWALVolumeMount() core_v1.VolumeMount { @@ -36,12 +38,16 @@ func PostgreSQLWALPath(cluster string) string { } // volume configuration settings used by the pgBackRest repo mount -const VOLUME_PGBACKREST_REPO_NAME = "backrestrepo" -const VOLUME_PGBACKREST_REPO_MOUNT_PATH = "/backrestrepo" +const ( + VOLUME_PGBACKREST_REPO_NAME = "backrestrepo" + VOLUME_PGBACKREST_REPO_MOUNT_PATH = "/backrestrepo" +) // volume configuration settings used by the SSHD secret -const VOLUME_SSHD_NAME = "sshd" -const VOLUME_SSHD_MOUNT_PATH = "/sshd" +const ( + VOLUME_SSHD_NAME = "sshd" + VOLUME_SSHD_MOUNT_PATH = "/sshd" +) // volume configuration settings used by tablespaces diff --git a/internal/controller/configmap/configmapcontroller.go b/internal/controller/configmap/configmapcontroller.go index a7145ea6bf..b0c1b2b1bf 100644 --- a/internal/controller/configmap/configmapcontroller.go +++ b/internal/controller/configmap/configmapcontroller.go @@ -1,7 +1,7 @@ package configmap /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -48,7 +48,6 @@ type Controller struct { func NewConfigMapController(restConfig *rest.Config, clientset kubernetes.Interface, coreInformer coreinformers.ConfigMapInformer, pgoInformer pgoinformers.PgclusterInformer, workerCount int) (*Controller, error) { - controller := &Controller{ cmRESTConfig: restConfig, kubeclientset: clientset, @@ -77,7 +76,6 @@ func NewConfigMapController(restConfig *rest.Config, // function in order to read and process a message on the worker queue. Once the worker queue // is instructed to shutdown, a message is written to the done channel. func (c *Controller) RunWorker(stopCh <-chan struct{}, doneCh chan<- struct{}) { - go c.waitForShutdown(stopCh) for c.processNextWorkItem() { @@ -105,7 +103,6 @@ func (c *Controller) ShutdownWorker() { // so, the configMap resource is converted into a namespace/name string and is then added to the // work queue func (c *Controller) enqueueConfigMap(obj interface{}) { - configMap := obj.(*apiv1.ConfigMap) labels := configMap.GetObjectMeta().GetLabels() @@ -128,7 +125,6 @@ func (c *Controller) enqueueConfigMap(obj interface{}) { // processNextWorkItem will read a single work item off the work queue and processes it via // the ConfigMap sync handler func (c *Controller) processNextWorkItem() bool { - obj, shutdown := c.workqueue.Get() if shutdown { diff --git a/internal/controller/configmap/synchandler.go b/internal/controller/configmap/synchandler.go index 9309c0555c..9f7c3a42cf 100644 --- a/internal/controller/configmap/synchandler.go +++ b/internal/controller/configmap/synchandler.go @@ -1,7 +1,7 @@ package configmap /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -31,7 +31,6 @@ import ( // handleConfigMapSync is responsible for syncing a configMap resource that has obtained from // the ConfigMap controller's worker queue func (c *Controller) handleConfigMapSync(key string) error { - log.Debugf("ConfigMap Controller: handling a configmap sync for key %s", key) namespace, configMapName, err := cache.SplitMetaNamespaceKey(key) @@ -72,7 +71,7 @@ func (c *Controller) handleConfigMapSync(key string) error { return nil } - c.syncPGHAConfig(c.createPGHAConfigs(configMap, clusterName, + c.syncPGHAConfig(c.createPGHAConfigs(configMap, cluster.GetObjectMeta().GetLabels()[config.LABEL_PGHA_SCOPE])) return nil @@ -80,8 +79,7 @@ func (c *Controller) handleConfigMapSync(key string) error { // createConfigurerMap creates the configs needed to sync the PGHA configMap func (c *Controller) createPGHAConfigs(configMap *corev1.ConfigMap, - clusterName, clusterScope string) []cfg.Syncer { - + clusterScope string) []cfg.Syncer { var configSyncers []cfg.Syncer configSyncers = append(configSyncers, cfg.NewDCS(configMap, c.kubeclientset, clusterScope)) @@ -100,7 +98,6 @@ func (c *Controller) createPGHAConfigs(configMap *corev1.ConfigMap, // syncAllConfigs takes a map of configurers and runs their sync functions concurrently func (c *Controller) syncPGHAConfig(configSyncers []cfg.Syncer) { - var wg sync.WaitGroup for _, configSyncer := range configSyncers { diff --git a/internal/controller/controllerutil.go b/internal/controller/controllerutil.go index 7bbcd2b981..fea8edfc71 100644 --- a/internal/controller/controllerutil.go +++ b/internal/controller/controllerutil.go @@ -1,7 +1,7 @@ package controller /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -21,6 +21,7 @@ import ( "errors" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/kubeapi" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" pgo "github.com/crunchydata/postgres-operator/pkg/generated/clientset/versioned" log "github.com/sirupsen/logrus" @@ -62,17 +63,20 @@ func InitializeReplicaCreation(clientset pgo.Interface, clusterName, log.Error(err) return err } - for _, pgreplica := range pgreplicaList.Items { - if pgreplica.Annotations == nil { - pgreplica.Annotations = make(map[string]string) + for i := range pgreplicaList.Items { + patch, err := kubeapi.NewMergePatch(). + Add("metadata", "annotations")(map[string]string{ + config.ANNOTATION_PGHA_BOOTSTRAP_REPLICA: "true", + }).Bytes() + if err != nil { + log.Error(err) } - pgreplica.Annotations[config.ANNOTATION_PGHA_BOOTSTRAP_REPLICA] = "true" - - if _, err = clientset.CrunchydataV1().Pgreplicas(namespace).Update(ctx, &pgreplica, metav1.UpdateOptions{}); err != nil { + if _, err := clientset.CrunchydataV1().Pgreplicas(namespace). + Patch(ctx, pgreplicaList.Items[i].GetName(), types.MergePatchType, patch, + metav1.PatchOptions{}); err != nil { log.Error(err) - return err } } return nil diff --git a/internal/controller/job/backresthandler.go b/internal/controller/job/backresthandler.go index cab5f3d068..9d1e6f672a 100644 --- a/internal/controller/job/backresthandler.go +++ b/internal/controller/job/backresthandler.go @@ -1,7 +1,7 @@ package job /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -26,38 +26,35 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/controller" "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/operator" "github.com/crunchydata/postgres-operator/internal/operator/backrest" - clusteroperator "github.com/crunchydata/postgres-operator/internal/operator/cluster" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" ) // backrestUpdateHandler is responsible for handling updates to backrest jobs -func (c *Controller) handleBackrestUpdate(job *apiv1.Job) error { - +func (c *Controller) handleBackrestUpdate(job *apiv1.Job) { // return if job wasn't successful if !isJobSuccessful(job) { log.Debugf("jobController onUpdate job %s was unsuccessful and will be ignored", job.Name) - return nil + return } // return if job is being deleted if isJobInForegroundDeletion(job) { log.Debugf("jobController onUpdate job %s is being deleted and will be ignored", job.Name) - return nil + return } labels := job.GetObjectMeta().GetLabels() switch { case labels[config.LABEL_BACKREST_COMMAND] == "backup": - c.handleBackrestBackupUpdate(job) + _ = c.handleBackrestBackupUpdate(job) case labels[config.LABEL_BACKREST_COMMAND] == crv1.PgtaskBackrestStanzaCreate: - c.handleBackrestStanzaCreateUpdate(job) + _ = c.handleBackrestStanzaCreateUpdate(job) } - - return nil } // handleBackrestRestoreUpdate is responsible for handling updates to backrest backup jobs @@ -79,7 +76,7 @@ func (c *Controller) handleBackrestBackupUpdate(job *apiv1.Job) error { if err != nil { log.Errorf("error in patching pgtask %s: %s", job.ObjectMeta.SelfLink, err.Error()) } - publishBackupComplete(labels[config.LABEL_PG_CLUSTER], job.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER], job.ObjectMeta.Labels[config.LABEL_PGOUSER], "pgbackrest", job.ObjectMeta.Namespace, "") + publishBackupComplete(labels[config.LABEL_PG_CLUSTER], job.ObjectMeta.Labels[config.LABEL_PGOUSER], "pgbackrest", job.ObjectMeta.Namespace, "") // If the completed backup was a cluster bootstrap backup, then mark the cluster as initialized // and initiate the creation of any replicas. Otherwise if the completed backup was taken as @@ -87,17 +84,22 @@ func (c *Controller) handleBackrestBackupUpdate(job *apiv1.Job) error { if labels[config.LABEL_PGHA_BACKUP_TYPE] == crv1.BackupTypeBootstrap { log.Debugf("jobController onUpdate initial backup complete") - controller.SetClusterInitializedStatus(c.Client, labels[config.LABEL_PG_CLUSTER], - job.ObjectMeta.Namespace) + if err := controller.SetClusterInitializedStatus(c.Client, labels[config.LABEL_PG_CLUSTER], + job.ObjectMeta.Namespace); err != nil { + log.Error(err) + return err + } - // now initialize the creation of any replica - controller.InitializeReplicaCreation(c.Client, labels[config.LABEL_PG_CLUSTER], - job.ObjectMeta.Namespace) + // now initialize the creation of any replicas + if err := controller.InitializeReplicaCreation(c.Client, labels[config.LABEL_PG_CLUSTER], + job.ObjectMeta.Namespace); err != nil { + log.Error(err) + return err + } } else if labels[config.LABEL_PGHA_BACKUP_TYPE] == crv1.BackupTypeFailover { - err := clusteroperator.RemovePrimaryOnRoleChangeTag(c.Client, c.Client.Config, - labels[config.LABEL_PG_CLUSTER], job.ObjectMeta.Namespace) - if err != nil { + if err := operator.RemovePrimaryOnRoleChangeTag(c.Client, c.Client.Config, + labels[config.LABEL_PG_CLUSTER], job.ObjectMeta.Namespace); err != nil { log.Error(err) return err } @@ -105,7 +107,8 @@ func (c *Controller) handleBackrestBackupUpdate(job *apiv1.Job) error { return nil } -// handleBackrestRestoreUpdate is responsible for handling updates to backrest stanza create jobs +// handleBackrestStanzaCreateUpdate is responsible for handling updates to +// backrest stanza create jobs func (c *Controller) handleBackrestStanzaCreateUpdate(job *apiv1.Job) error { ctx := context.TODO() @@ -141,7 +144,18 @@ func (c *Controller) handleBackrestStanzaCreateUpdate(job *apiv1.Job) error { if cluster.Spec.Standby { log.Debugf("job Controller: standby cluster %s will now be set to an initialized "+ "status", clusterName) - controller.SetClusterInitializedStatus(c.Client, clusterName, namespace) + if err := controller.SetClusterInitializedStatus(c.Client, clusterName, + namespace); err != nil { + log.Error(err) + return err + } + + // now initialize the creation of any replica + if err := controller.InitializeReplicaCreation(c.Client, clusterName, + namespace); err != nil { + log.Error(err) + return err + } return nil } @@ -153,8 +167,19 @@ func (c *Controller) handleBackrestStanzaCreateUpdate(job *apiv1.Job) error { return err } - backrest.CreateInitialBackup(c.Client, job.ObjectMeta.Namespace, - clusterName, backrestRepoPodName) + if _, err := backrest.CreateInitialBackup(c.Client, job.ObjectMeta.Namespace, + clusterName, backrestRepoPodName); err != nil { + log.Error(err) + return err + } + + // now that the initial backup has been initiated, proceed with deleting the stanza-create + // pgtask and associated Job. This will ensure any subsequent updates to the stanza-create + // Job do not trigger more initial backup Jobs. + if err := backrest.CleanStanzaCreateResources(namespace, clusterName, c.Client); err != nil { + log.Error(err) + return err + } } return nil diff --git a/internal/controller/job/bootstraphandler.go b/internal/controller/job/bootstraphandler.go index 4c4f383d25..84fbcdded1 100644 --- a/internal/controller/job/bootstraphandler.go +++ b/internal/controller/job/bootstraphandler.go @@ -1,7 +1,7 @@ package job /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -82,7 +82,7 @@ func (c *Controller) handleBootstrapUpdate(job *apiv1.Job) error { // If the job was successful we updated the state of the pgcluster to a "bootstrapped" status. // This will then trigger full initialization of the cluster. We also cleanup any resources - // from the bootstrap job. + // from the bootstrap job and delete the job itself if cluster.Status.State == crv1.PgclusterStateBootstrapping { if err := c.cleanupBootstrapResources(job, cluster, restore); err != nil { @@ -103,6 +103,11 @@ func (c *Controller) handleBootstrapUpdate(job *apiv1.Job) error { log.Error(err) return err } + + // as it is no longer needed, delete the job + deletePropagation := metav1.DeletePropagationBackground + return c.Client.BatchV1().Jobs(namespace).Delete(ctx, job.Name, + metav1.DeleteOptions{PropagationPolicy: &deletePropagation}) } if restore { @@ -110,7 +115,7 @@ func (c *Controller) handleBootstrapUpdate(job *apiv1.Job) error { namespace, crv1.PgtaskWorkflowBackrestRestorePrimaryCreatedStatus); err != nil { log.Warn(err) } - publishRestoreComplete(labels[config.LABEL_PG_CLUSTER], labels[config.LABEL_PG_CLUSTER_IDENTIFIER], + publishRestoreComplete(labels[config.LABEL_PG_CLUSTER], labels[config.LABEL_PGOUSER], job.ObjectMeta.Namespace) } diff --git a/internal/controller/job/jobcontroller.go b/internal/controller/job/jobcontroller.go index 85e5e82c57..dbcc708224 100644 --- a/internal/controller/job/jobcontroller.go +++ b/internal/controller/job/jobcontroller.go @@ -1,7 +1,7 @@ package job /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -33,11 +33,10 @@ type Controller struct { // onAdd is called when a postgresql operator job is created and an associated add event is // generated func (c *Controller) onAdd(obj interface{}) { - job := obj.(*apiv1.Job) labels := job.GetObjectMeta().GetLabels() - //only process jobs with with vendor=crunchydata label + // only process jobs with with vendor=crunchydata label if labels[config.LABEL_VENDOR] != "crunchydata" { return } @@ -48,12 +47,11 @@ func (c *Controller) onAdd(obj interface{}) { // onUpdate is called when a postgresql operator job is created and an associated update event is // generated func (c *Controller) onUpdate(oldObj, newObj interface{}) { - var err error job := newObj.(*apiv1.Job) labels := job.GetObjectMeta().GetLabels() - //only process jobs with with vendor=crunchydata label + // only process jobs with with vendor=crunchydata label if labels[config.LABEL_VENDOR] != "crunchydata" { return } @@ -69,7 +67,7 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) { err = c.handleRMDataUpdate(job) case labels[config.LABEL_BACKREST] == "true" || labels[config.LABEL_BACKREST_RESTORE] == "true": - err = c.handleBackrestUpdate(job) + c.handleBackrestUpdate(job) case labels[config.LABEL_BACKUP_TYPE_PGDUMP] == "true": err = c.handlePGDumpUpdate(job) case labels[config.LABEL_RESTORE_TYPE_PGRESTORE] == "true": @@ -85,11 +83,10 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) { // onDelete is called when a postgresql operator job is deleted func (c *Controller) onDelete(obj interface{}) { - job := obj.(*apiv1.Job) labels := job.GetObjectMeta().GetLabels() - //only process jobs with with vendor=crunchydata label + // only process jobs with with vendor=crunchydata label if labels[config.LABEL_VENDOR] != "crunchydata" { return } @@ -99,7 +96,6 @@ func (c *Controller) onDelete(obj interface{}) { // AddJobEventHandler adds the job event handler to the job informer func (c *Controller) AddJobEventHandler() { - c.Informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.onAdd, UpdateFunc: c.onUpdate, diff --git a/internal/controller/job/jobevents.go b/internal/controller/job/jobevents.go index ef4f1a1760..3f49be3d4c 100644 --- a/internal/controller/job/jobevents.go +++ b/internal/controller/job/jobevents.go @@ -1,7 +1,7 @@ package job /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -22,7 +22,7 @@ import ( log "github.com/sirupsen/logrus" ) -func publishBackupComplete(clusterName, clusterIdentifier, username, backuptype, namespace, path string) { +func publishBackupComplete(clusterName, username, backuptype, namespace, path string) { topics := make([]string, 2) topics[0] = events.EventTopicCluster topics[1] = events.EventTopicBackup @@ -44,10 +44,9 @@ func publishBackupComplete(clusterName, clusterIdentifier, username, backuptype, if err != nil { log.Error(err.Error()) } - } -func publishRestoreComplete(clusterName, identifier, username, namespace string) { +func publishRestoreComplete(clusterName, username, namespace string) { topics := make([]string, 1) topics[0] = events.EventTopicCluster @@ -66,10 +65,9 @@ func publishRestoreComplete(clusterName, identifier, username, namespace string) if err != nil { log.Error(err.Error()) } - } -func publishDeleteClusterComplete(clusterName, identifier, username, namespace string) { +func publishDeleteClusterComplete(clusterName, username, namespace string) { topics := make([]string, 1) topics[0] = events.EventTopicCluster diff --git a/internal/controller/job/jobutil.go b/internal/controller/job/jobutil.go index 78d6bb6e34..9d742ed906 100644 --- a/internal/controller/job/jobutil.go +++ b/internal/controller/job/jobutil.go @@ -1,7 +1,7 @@ package job /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/controller/job/pgdumphandler.go b/internal/controller/job/pgdumphandler.go index 5b33285789..ea7a0a8a50 100644 --- a/internal/controller/job/pgdumphandler.go +++ b/internal/controller/job/pgdumphandler.go @@ -1,7 +1,7 @@ package job /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -45,7 +45,7 @@ func (c *Controller) handlePGDumpUpdate(job *apiv1.Job) error { status = crv1.JobErrorStatus + " [" + job.ObjectMeta.Name + "]" } - //update the pgdump task status to submitted - updates task, not the job. + // update the pgdump task status to submitted - updates task, not the job. dumpTask := labels[config.LABEL_PGTASK] patch, err := kubeapi.NewJSONPatch().Add("spec", "status")(status).Bytes() if err == nil { @@ -81,7 +81,7 @@ func (c *Controller) handlePGRestoreUpdate(job *apiv1.Job) error { status = crv1.JobErrorStatus + " [" + job.ObjectMeta.Name + "]" } - //update the pgdump task status to submitted - updates task, not the job. + // update the pgdump task status to submitted - updates task, not the job. restoreTask := labels[config.LABEL_PGTASK] patch, err := kubeapi.NewJSONPatch().Add("spec", "status")(status).Bytes() if err == nil { diff --git a/internal/controller/job/rmdatahandler.go b/internal/controller/job/rmdatahandler.go index 0aa1624f7d..86e226a850 100644 --- a/internal/controller/job/rmdatahandler.go +++ b/internal/controller/job/rmdatahandler.go @@ -1,7 +1,7 @@ package job /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -48,7 +48,6 @@ func (c *Controller) handleRMDataUpdate(job *apiv1.Job) error { log.Debugf("jobController onUpdate rmdata job succeeded") publishDeleteClusterComplete(labels[config.LABEL_PG_CLUSTER], - job.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER], job.ObjectMeta.Labels[config.LABEL_PGOUSER], job.ObjectMeta.Namespace) @@ -77,7 +76,7 @@ func (c *Controller) handleRMDataUpdate(job *apiv1.Job) error { return fmt.Errorf("could not remove Job %s for some reason after max tries", job.Name) } - //if a user has specified --archive for a cluster then + // if a user has specified --archive for a cluster then // an xlog PVC will be present and can be removed pvcName := clusterName + "-xlog" if err := pvc.DeleteIfExists(c.Client.Clientset, pvcName, job.Namespace); err != nil { @@ -85,7 +84,7 @@ func (c *Controller) handleRMDataUpdate(job *apiv1.Job) error { return err } - //delete any completed jobs for this cluster as a cleanup + // delete any completed jobs for this cluster as a cleanup jobList, err := c.Client. BatchV1().Jobs(job.Namespace). List(ctx, metav1.ListOptions{LabelSelector: config.LABEL_PG_CLUSTER + "=" + clusterName}) diff --git a/internal/controller/manager/controllermanager.go b/internal/controller/manager/controllermanager.go index bb2c2e1039..18721ead64 100644 --- a/internal/controller/manager/controllermanager.go +++ b/internal/controller/manager/controllermanager.go @@ -1,7 +1,7 @@ package manager /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -61,6 +61,8 @@ type ControllerManager struct { pgoConfig config.PgoConfig pgoNamespace string sem *semaphore.Weighted + + NewKubernetesClient func() (*kubeapi.Client, error) } // controllerGroup is a struct for managing the various controllers created to handle events @@ -82,7 +84,6 @@ type controllerGroup struct { func NewControllerManager(namespaces []string, pgoConfig config.PgoConfig, pgoNamespace, installationName string, namespaceOperatingMode ns.NamespaceOperatingMode) (*ControllerManager, error) { - controllerManager := ControllerManager{ controllers: make(map[string]*controllerGroup), installationName: installationName, @@ -90,6 +91,8 @@ func NewControllerManager(namespaces []string, pgoConfig: pgoConfig, pgoNamespace: pgoNamespace, sem: semaphore.NewWeighted(1), + + NewKubernetesClient: kubeapi.NewClient, } // create controller groups for each namespace provided @@ -119,7 +122,6 @@ func NewControllerManager(namespaces []string, // easily started as needed). Each controller group also receives its own clients, which can then // be utilized by the various controllers within that controller group. func (c *ControllerManager) AddGroup(namespace string) error { - c.mgrMutex.Lock() defer c.mgrMutex.Unlock() @@ -135,7 +137,6 @@ func (c *ControllerManager) AddGroup(namespace string) error { // AddAndRunGroup is a convenience function that adds a controller group for the // namespace specified, and then immediately runs the controllers in that group. func (c *ControllerManager) AddAndRunGroup(namespace string) error { - if c.controllers[namespace] != nil && !c.pgoConfig.Pgo.DisableReconcileRBAC { // first reconcile RBAC in the target namespace if RBAC reconciliation is enabled c.reconcileRBAC(namespace) @@ -161,7 +162,6 @@ func (c *ControllerManager) AddAndRunGroup(namespace string) error { // RemoveAll removes all controller groups managed by the controller manager, first stopping all // controllers within each controller group managed by the controller manager. func (c *ControllerManager) RemoveAll() { - c.mgrMutex.Lock() defer c.mgrMutex.Unlock() @@ -175,7 +175,6 @@ func (c *ControllerManager) RemoveAll() { // RemoveGroup removes the controller group for the namespace specified, first stopping all // controllers within that group func (c *ControllerManager) RemoveGroup(namespace string) { - c.mgrMutex.Lock() defer c.mgrMutex.Unlock() @@ -184,7 +183,6 @@ func (c *ControllerManager) RemoveGroup(namespace string) { // RunAll runs all controllers across all controller groups managed by the controller manager. func (c *ControllerManager) RunAll() error { - c.mgrMutex.Lock() defer c.mgrMutex.Unlock() @@ -201,7 +199,6 @@ func (c *ControllerManager) RunAll() error { // RunGroup runs the controllers within the controller group for the namespace specified. func (c *ControllerManager) RunGroup(namespace string) error { - c.mgrMutex.Lock() defer c.mgrMutex.Unlock() @@ -222,14 +219,13 @@ func (c *ControllerManager) RunGroup(namespace string) error { // addControllerGroup adds a new controller group for the namespace specified func (c *ControllerManager) addControllerGroup(namespace string) error { - if _, ok := c.controllers[namespace]; ok { log.Debugf("Controller Manager: a controller for namespace %s already exists", namespace) return controller.ErrControllerGroupExists } // create a client for kube resources - client, err := kubeapi.NewClient() + client, err := c.NewKubernetesClient() if err != nil { log.Error(err) return err @@ -260,7 +256,7 @@ func (c *ControllerManager) addControllerGroup(namespace string) error { } pgReplicacontroller := &pgreplica.Controller{ - Clientset: client, + Client: client, Queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), Informer: pgoInformerFactory.Crunchydata().V1().Pgreplicas(), PgreplicaWorkerCount: *c.pgoConfig.Pgo.PGReplicaWorkerCount, @@ -336,7 +332,6 @@ func (c *ControllerManager) addControllerGroup(namespace string) error { // hasListerPrivs verifies the Operator has the privileges required to start the controllers // for the namespace specified. func (c *ControllerManager) hasListerPrivs(namespace string) bool { - controllerGroup := c.controllers[namespace] var err error @@ -385,7 +380,6 @@ func (c *ControllerManager) hasListerPrivs(namespace string) bool { // runControllerGroup is responsible running the controllers for the controller group corresponding // to the namespace provided func (c *ControllerManager) runControllerGroup(namespace string) error { - controllerGroup := c.controllers[namespace] hasListerPrivs := c.hasListerPrivs(namespace) @@ -438,7 +432,6 @@ func (c *ControllerManager) runControllerGroup(namespace string) error { // queues associated with the controllers inside of the controller group are first shutdown // prior to removing the controller group. func (c *ControllerManager) removeControllerGroup(namespace string) { - if _, ok := c.controllers[namespace]; !ok { log.Debugf("Controller Manager: no controller group to remove for ns %s", namespace) return @@ -454,7 +447,6 @@ func (c *ControllerManager) removeControllerGroup(namespace string) { // done by calling the ShutdownWorker function associated with the controller. If the controller // does not have a ShutdownWorker function then no action is taken. func (c *ControllerManager) stopControllerGroup(namespace string) { - if _, ok := c.controllers[namespace]; !ok { log.Debugf("Controller Manager: unable to stop controller group for namespace %s because "+ "a controller group for this namespace does not exist", namespace) diff --git a/internal/controller/manager/rbac.go b/internal/controller/manager/rbac.go index bb972e5202..e33d0a4dd9 100644 --- a/internal/controller/manager/rbac.go +++ b/internal/controller/manager/rbac.go @@ -1,7 +1,7 @@ package manager /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -83,7 +83,6 @@ func (c *ControllerManager) reconcileRBAC(targetNamespace string) { // reconcileRoles reconciles the Roles required by the operator in a target namespace func (c *ControllerManager) reconcileRoles(targetNamespace string) { - reconcileRoles := map[string]*template.Template{ ns.PGO_TARGET_ROLE: config.PgoTargetRoleTemplate, ns.PGO_BACKREST_ROLE: config.PgoBackrestRoleTemplate, @@ -101,7 +100,6 @@ func (c *ControllerManager) reconcileRoles(targetNamespace string) { // reconcileRoleBindings reconciles the RoleBindings required by the operator in a // target namespace func (c *ControllerManager) reconcileRoleBindings(targetNamespace string) { - reconcileRoleBindings := map[string]*template.Template{ ns.PGO_TARGET_ROLE_BINDING: config.PgoTargetRoleBindingTemplate, ns.PGO_BACKREST_ROLE_BINDING: config.PgoBackrestRoleBindingTemplate, @@ -120,7 +118,6 @@ func (c *ControllerManager) reconcileRoleBindings(targetNamespace string) { // target namespace func (c *ControllerManager) reconcileServiceAccounts(targetNamespace string, imagePullSecrets []v1.LocalObjectReference) (saCreatedOrUpdated bool) { - reconcileServiceAccounts := map[string]*template.Template{ ns.PGO_DEFAULT_SERVICE_ACCOUNT: config.PgoDefaultServiceAccountTemplate, ns.PGO_TARGET_SERVICE_ACCOUNT: config.PgoTargetServiceAccountTemplate, diff --git a/internal/controller/namespace/namespacecontroller.go b/internal/controller/namespace/namespacecontroller.go index 6fc85f644f..757307fd53 100644 --- a/internal/controller/namespace/namespacecontroller.go +++ b/internal/controller/namespace/namespacecontroller.go @@ -1,7 +1,7 @@ package namespace /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -43,7 +43,6 @@ type Controller struct { // PostgreSQL Operator are added and deleted. func NewNamespaceController(controllerManager controller.Manager, informer coreinformers.NamespaceInformer, workerCount int) (*Controller, error) { - controller := &Controller{ ControllerManager: controllerManager, Informer: informer, @@ -72,7 +71,6 @@ func NewNamespaceController(controllerManager controller.Manager, // function in order to read and process a message on the worker queue. Once the worker queue // is instructed to shutdown, a message is written to the done channel. func (c *Controller) RunWorker(stopCh <-chan struct{}) { - go c.waitForShutdown(stopCh) for c.processNextWorkItem() { @@ -96,7 +94,6 @@ func (c *Controller) ShutdownWorker() { // so, the namespace resource is converted into a namespace/name string and is then added to the // work queue func (c *Controller) enqueueNamespace(obj interface{}) { - var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { @@ -109,7 +106,6 @@ func (c *Controller) enqueueNamespace(obj interface{}) { // processNextWorkItem will read a single work item off the work queue and processes it via // the Namespace sync handler func (c *Controller) processNextWorkItem() bool { - obj, shutdown := c.workqueue.Get() if shutdown { diff --git a/internal/controller/pgcluster/pgclustercontroller.go b/internal/controller/pgcluster/pgclustercontroller.go index d8da6658d9..ce095bbdd4 100644 --- a/internal/controller/pgcluster/pgclustercontroller.go +++ b/internal/controller/pgcluster/pgclustercontroller.go @@ -1,7 +1,7 @@ package pgcluster /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,21 +18,24 @@ limitations under the License. import ( "context" "encoding/json" - "io/ioutil" "reflect" - "strconv" "strings" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/operator" backrestoperator "github.com/crunchydata/postgres-operator/internal/operator/backrest" clusteroperator "github.com/crunchydata/postgres-operator/internal/operator/cluster" + "github.com/crunchydata/postgres-operator/internal/operator/pvc" "github.com/crunchydata/postgres-operator/internal/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" informers "github.com/crunchydata/postgres-operator/pkg/generated/informers/externalversions/crunchydata.com/v1" log "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -59,7 +62,6 @@ func (c *Controller) onAdd(obj interface{}) { // processNextWorkItem function in order to read and process a message on the // workqueue. func (c *Controller) RunWorker(stopCh <-chan struct{}, doneCh chan<- struct{}) { - go c.waitForShutdown(stopCh) for c.processNextItem() { @@ -97,7 +99,7 @@ func (c *Controller) processNextItem() bool { // parallel. defer c.Queue.Done(key) - //get the pgcluster + // get the pgcluster cluster, err := c.Client.CrunchydataV1().Pgclusters(keyNamespace).Get(ctx, keyResourceName, metav1.GetOptions{}) if err != nil { log.Debugf("cluster add - pgcluster not found, this is invalid") @@ -115,8 +117,6 @@ func (c *Controller) processNextItem() bool { return true } - addIdentifier(cluster) - // If bootstrapping from an existing data source then attempt to create the pgBackRest repository. // If a repo already exists (e.g. because it is associated with a currently running cluster) then // proceed with bootstrapping. @@ -173,6 +173,9 @@ func (c *Controller) processNextItem() bool { func (c *Controller) onUpdate(oldObj, newObj interface{}) { oldcluster := oldObj.(*crv1.Pgcluster) newcluster := newObj.(*crv1.Pgcluster) + // initialize a slice that may contain functions that need to be executed + // as part of a rolling update + rollingUpdateFuncs := [](func(kubeapi.Interface, *crv1.Pgcluster, *appsv1.Deployment) error){} log.Debugf("pgcluster onUpdate for cluster %s (namespace %s)", newcluster.ObjectMeta.Namespace, newcluster.ObjectMeta.Name) @@ -188,33 +191,33 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) { // if the 'shutdown' parameter in the pgcluster update shows that the cluster should be either // shutdown or started but its current status does not properly reflect that it is, then // proceed with the logic needed to either shutdown or start the cluster + // + // we do need to check if the status has info in it. There have been cases + // where the entire status has been removed that could be external to the + // operator itself. In the case of checking that the state is in a shutdown + // phase, we also want to check if the status is completely empty. If it is, + // we will proceed with the shutdown. if newcluster.Spec.Shutdown && newcluster.Status.State != crv1.PgclusterStateShutdown { - clusteroperator.ShutdownCluster(c.Client, *newcluster) - } else if !newcluster.Spec.Shutdown && - newcluster.Status.State == crv1.PgclusterStateShutdown { - clusteroperator.StartupCluster(c.Client, *newcluster) - } - - // check to see if the "autofail" label on the pgcluster CR has been changed from either true to false, or from - // false to true. If it has been changed to false, autofail will then be disabled in the pg cluster. If has - // been changed to true, autofail will then be enabled in the pg cluster - if newcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL] != "" { - autofailEnabledOld, err := strconv.ParseBool(oldcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL]) - if err != nil { + if err := clusteroperator.ShutdownCluster(c.Client, *newcluster); err != nil { log.Error(err) - return } - autofailEnabledNew, err := strconv.ParseBool(newcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL]) - if err != nil { + } else if !newcluster.Spec.Shutdown && + (newcluster.Status.State == crv1.PgclusterStateShutdown || newcluster.Status.State == "") { + if err := clusteroperator.StartupCluster(c.Client, *newcluster); err != nil { log.Error(err) - return - } - if autofailEnabledNew != autofailEnabledOld { - util.ToggleAutoFailover(c.Client, autofailEnabledNew, - newcluster.ObjectMeta.Labels[config.LABEL_PGHA_SCOPE], - newcluster.ObjectMeta.Namespace) } + } + // check to see if autofail setting has been changed. If set to "true", it + // will be disabled, otherwise it will be enabled. Simple. + if oldcluster.Spec.DisableAutofail != newcluster.Spec.DisableAutofail { + // take the inverse, as this func checks for autofail being enabled + // if we can't toggle autofailover, log the error but continue on + if err := util.ToggleAutoFailover(c.Client, !newcluster.Spec.DisableAutofail, + newcluster.ObjectMeta.Labels[config.LABEL_PGHA_SCOPE], + newcluster.ObjectMeta.Namespace); err != nil { + log.Error(err) + } } // handle standby being enabled and disabled for the cluster @@ -230,16 +233,57 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) { } } + // if the service type has changed, update the service type. Log an error if + // it fails, but continue on + if oldcluster.Spec.ServiceType != newcluster.Spec.ServiceType { + updateServices(c.Client, newcluster) + } + + // see if we are adding / removing the metrics collection sidecar + if oldcluster.Spec.Exporter != newcluster.Spec.Exporter { + var err error + + // determine if the sidecar is being enabled/disabled and take the precursor + // actions before the deployment template is modified + if newcluster.Spec.Exporter { + err = clusteroperator.AddExporter(c.Client, c.Client.Config, newcluster) + } else { + err = clusteroperator.RemoveExporter(c.Client, c.Client.Config, newcluster) + } + + if err == nil { + rollingUpdateFuncs = append(rollingUpdateFuncs, clusteroperator.UpdateExporterSidecar) + } else { + log.Errorf("could not update metrics collection sidecar: %q", err.Error()) + } + } + + // see if we are adding / removing the pgBadger sidecar + if oldcluster.Spec.PGBadger != newcluster.Spec.PGBadger { + var err error + + // determine if the sidecar is being enabled/disabled and take the precursor + // actions before the deployment template is modified + if newcluster.Spec.PGBadger { + err = clusteroperator.AddPGBadger(c.Client, c.Client.Config, newcluster) + } else { + err = clusteroperator.RemovePGBadger(c.Client, c.Client.Config, newcluster) + } + + if err == nil { + rollingUpdateFuncs = append(rollingUpdateFuncs, clusteroperator.UpdatePGBadgerSidecar) + } else { + log.Errorf("could not update pgbadger sidecar: %q", err.Error()) + } + } + // see if any of the resource values have changed for the database or exporter container, // if so, update them if !reflect.DeepEqual(oldcluster.Spec.Resources, newcluster.Spec.Resources) || !reflect.DeepEqual(oldcluster.Spec.Limits, newcluster.Spec.Limits) || !reflect.DeepEqual(oldcluster.Spec.ExporterResources, newcluster.Spec.ExporterResources) || !reflect.DeepEqual(oldcluster.Spec.ExporterLimits, newcluster.Spec.ExporterLimits) { - if err := clusteroperator.UpdateResources(c.Client, c.Client.Config, newcluster); err != nil { - log.Error(err) - return - } + rollingUpdateFuncs = append(rollingUpdateFuncs, clusteroperator.UpdateResources) } // see if any of the pgBackRest repository resource values have changed, and @@ -268,30 +312,114 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) { log.Error(err) return } + rollingUpdateFuncs = append(rollingUpdateFuncs, clusteroperator.UpdateTablespaces) } // check to see if any of the annotations have been modified, in particular, // the non-system annotations if !reflect.DeepEqual(oldcluster.Spec.Annotations, newcluster.Spec.Annotations) { - if err := updateAnnotations(c, oldcluster, newcluster); err != nil { + if changed, err := updateAnnotations(c, oldcluster, newcluster); err != nil { log.Error(err) return + } else if changed { + // append the PostgreSQL specific functions as part of a rolling update + rollingUpdateFuncs = append(rollingUpdateFuncs, clusteroperator.UpdateAnnotations) } } + + // check to see if any tolerations have been modified + if !reflect.DeepEqual(oldcluster.Spec.Tolerations, newcluster.Spec.Tolerations) { + rollingUpdateFuncs = append(rollingUpdateFuncs, clusteroperator.UpdateTolerations) + } + + // if there is no need to perform a rolling update, exit here + if len(rollingUpdateFuncs) == 0 { + return + } + + // otherwise, create an anonymous function that executes each of the rolling + // update functions as part of the rolling update + if err := clusteroperator.RollingUpdate(c.Client, c.Client.Config, newcluster, + func(clientset kubeapi.Interface, cluster *crv1.Pgcluster, deployment *appsv1.Deployment) error { + for _, fn := range rollingUpdateFuncs { + if err := fn(clientset, cluster, deployment); err != nil { + return err + } + } + return nil + }); err != nil { + log.Error(err) + return + } } // onDelete is called when a pgcluster is deleted func (c *Controller) onDelete(obj interface{}) { - //cluster := obj.(*crv1.Pgcluster) - // log.Debugf("[Controller] ns=%s onDelete %s", cluster.ObjectMeta.Namespace, cluster.ObjectMeta.SelfLink) + ctx := context.TODO() + cluster := obj.(*crv1.Pgcluster) + + log.Debugf("pgcluster onDelete for cluster %s (namespace %s)", cluster.Name, cluster.Namespace) + + // guard: if an upgrade is in progress, do not do any of the rest + if _, ok := cluster.ObjectMeta.GetAnnotations()[config.ANNOTATION_UPGRADE_IN_PROGRESS]; ok { + log.Debug("upgrade in progress, not proceeding with additional cleanups") + return + } + + // guard: see if the "rmdata Job" is running. + options := metav1.ListOptions{ + LabelSelector: fields.AndSelectors( + fields.OneTermEqualSelector(config.LABEL_PG_CLUSTER, cluster.Name), + fields.OneTermEqualSelector(config.LABEL_RMDATA, config.LABEL_TRUE), + ).String(), + } + + jobs, err := c.Client.BatchV1().Jobs(cluster.Namespace).List(ctx, options) + + if err != nil { + log.Error(err) + return + } + + // iterate through the list of Jobs and see if any are currently active or + // succeeded. + // a succeeded Job could be a remnaint of an old Job for the cluser of a + // same name, in which case, we can continue with deleting the cluster + for _, job := range jobs.Items { + // we will return for one of two reasons: + // 1. if the Job is currently active + // 2. if the Job is not active but never has completed and is below the + // backoff limit -- this could be evidence that the Job is retrying + if job.Status.Active > 0 { + return + } else if job.Status.Succeeded < 1 && job.Status.Failed < *job.Spec.BackoffLimit { + return + } + } + + // we need to create a special pgtask that will create the Job (I know). So + // let's attempt to do that here. First, clear out any other pgtask with this + // existing name. If it errors because it's not found, we're OK + taskName := cluster.Name + "-rmdata" + if err := c.Client.CrunchydataV1().Pgtasks(cluster.Namespace).Delete( + ctx, taskName, metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) { + log.Error(err) + return + } + + // determine if the data directory or backups should be kept + _, keepBackups := cluster.ObjectMeta.GetAnnotations()[config.ANNOTATION_CLUSTER_KEEP_BACKUPS] + _, keepData := cluster.ObjectMeta.GetAnnotations()[config.ANNOTATION_CLUSTER_KEEP_DATA] - //handle pgcluster cleanup - // clusteroperator.DeleteClusterBase(c.PgclusterClientset, c.PgclusterClient, cluster, cluster.ObjectMeta.Namespace) + // create the deletion job. this will delete any data and backups for this + // cluster + if err := util.CreateRMDataTask(c.Client, cluster, "", !keepBackups, !keepData, false, false); err != nil { + log.Error(err) + } } // AddPGClusterEventHandler adds the pgcluster event handler to the pgcluster informer func (c *Controller) AddPGClusterEventHandler() { - c.Informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.onAdd, UpdateFunc: c.onUpdate, @@ -301,23 +429,17 @@ func (c *Controller) AddPGClusterEventHandler() { log.Debugf("pgcluster Controller: added event handler to informer") } -func addIdentifier(clusterCopy *crv1.Pgcluster) { - u, err := ioutil.ReadFile("/proc/sys/kernel/random/uuid") - if err != nil { - log.Error(err) - } - - clusterCopy.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = string(u[:len(u)-1]) -} - // updateAnnotations updates any custom annitations that may be on the managed // deployments, which includes: // // - globally applied annotations -// - postgres instance specific annotations // - pgBackRest instance specific annotations // - pgBouncer instance specific annotations -func updateAnnotations(c *Controller, oldCluster *crv1.Pgcluster, newCluster *crv1.Pgcluster) error { +// +// The Postgres specific annotations need to be handled by the caller function, +// due to the fact they need to be applied in a rolling update manner that can +// be controlled. We indicate this to the calling function by returning "true" +func updateAnnotations(c *Controller, oldCluster *crv1.Pgcluster, newCluster *crv1.Pgcluster) (bool, error) { // so we have a two-tier problem we need to solve: // 1. Which of the deployment types are being modified (or in the case of // global, all of them)? @@ -371,25 +493,19 @@ func updateAnnotations(c *Controller, oldCluster *crv1.Pgcluster, newCluster *cr // so if there are changes, we can apply them to the various deployments, // but only do so if we have to - if len(annotationsPostgres) != 0 { - if err := clusteroperator.UpdateAnnotations(c.Client, c.Client.Config, newCluster, annotationsPostgres); err != nil { - return err - } - } - if len(annotationsBackrest) != 0 { if err := backrestoperator.UpdateAnnotations(c.Client, newCluster, annotationsBackrest); err != nil { - return err + return false, err } } if len(annotationsPgBouncer) != 0 { - if err := clusteroperator.UpdatePgBouncerAnnotations(c.Client, newCluster, annotationsPgBouncer); err != nil { - return err + if err := clusteroperator.UpdatePgBouncerAnnotations(c.Client, newCluster, annotationsPgBouncer); err != nil && !kerrors.IsNotFound(err) { + return false, err } } - return nil + return len(annotationsPostgres) != 0, nil } // updatePgBouncer updates the pgBouncer Deployment to reflect any changes that @@ -417,31 +533,102 @@ func updatePgBouncer(c *Controller, oldCluster *crv1.Pgcluster, newCluster *crv1 return clusteroperator.UpdatePgbouncer(c.Client, oldCluster, newCluster) } +// updateServices handles any updates to the Service objects. Given how legacy +// replica services are handled (really, replica service singular), the update +// around replica services is a bit grotty, but it is what it is. +// +// If there are errors on the updates, this logs them but will continue on +// unless otherwise noted. +func updateServices(clientset kubeapi.Interface, cluster *crv1.Pgcluster) { + ctx := context.TODO() + + // handle the primary instance + if err := clusteroperator.UpdateClusterService(clientset, cluster); err != nil { + log.Error(err) + } + + // if there is a pgBouncer and the pgBouncer service type value is empty, + // update the pgBouncer Service + if cluster.Spec.PgBouncer.Enabled() && cluster.Spec.PgBouncer.ServiceType == "" { + if err := clusteroperator.UpdatePgBouncerService(clientset, cluster); err != nil { + log.Error(err) + } + } + + // handle the replica instances. Ish. This is kind of "broken" due to the + // fact that we have a single service for all of the replicas. so, we'll + // loop through all of the replicas and try to see if any of them have + // any specialized service types. If so, we'll pluck that one out and use + // it to apply + options := metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector(config.LABEL_PG_CLUSTER, cluster.Name).String(), + } + replicas, err := clientset.CrunchydataV1().Pgreplicas(cluster.Namespace).List(ctx, options) + + // well, if there is an error here, log it and abort + if err != nil { + log.Error(err) + return + } + + // if there are no replicas, also return + if len(replicas.Items) == 0 { + return + } + + // ok, we're guaranteed at least one replica, so there should be a Service + var replica *crv1.Pgreplica + for i := range replicas.Items { + // store the replica no matter what, for later comparison + replica = &replicas.Items[i] + // however, if the servicetype is customized, break out. Yup. + if replica.Spec.ServiceType != "" { + break + } + } + + if err := clusteroperator.UpdateReplicaService(clientset, cluster, replica); err != nil { + log.Error(err) + } +} + // updateTablespaces updates the PostgreSQL instance Deployments to reflect the // new PostgreSQL tablespaces that should be added func updateTablespaces(c *Controller, oldCluster *crv1.Pgcluster, newCluster *crv1.Pgcluster) error { - // to help the Operator function do less work, we will get a list of new - // tablespaces. Though these are already present in the CRD, this will isolate - // exactly which PVCs need to be created - // - // To do this, iterate through the the tablespace mount map that is present in - // the new cluster. - newTablespaces := map[string]crv1.PgStorageSpec{} + // first, get a list of all of the instance deployments for the cluster + deployments, err := operator.GetInstanceDeployments(c.Client, newCluster) + if err != nil { + return err + } + // iterate through the the tablespace mount map that is present in and create + // any new PVCs for tablespaceName, storageSpec := range newCluster.Spec.TablespaceMounts { // if the tablespace does not exist in the old version of the cluster, // then add it in! - if _, ok := oldCluster.Spec.TablespaceMounts[tablespaceName]; !ok { - log.Debugf("new tablespace found: [%s]", tablespaceName) - - newTablespaces[tablespaceName] = storageSpec + if _, ok := oldCluster.Spec.TablespaceMounts[tablespaceName]; ok { + continue } - } - // alright, update the tablespace entries for this cluster! - // if it returns an error, pass the error back up to the caller - if err := clusteroperator.UpdateTablespaces(c.Client, c.Client.Config, newCluster, newTablespaces); err != nil { - return err + log.Debugf("new tablespace found: [%s]", tablespaceName) + + // This is a new tablespace, great. Create the new PVCs. + // The PVCs are created for each **instance** in the cluster, as every + // instance needs to have a distinct PVC for each tablespace + // get the name of the tablespace PVC for that instance. + for _, deployment := range deployments.Items { + tablespacePVCName := operator.GetTablespacePVCName(deployment.Name, tablespaceName) + + log.Debugf("creating tablespace PVC [%s] for [%s]", tablespacePVCName, deployment.Name) + + // Now create it! If it errors, we just need to return, which + // potentially leaves things in an inconsistent state, but at this point + // only PVC objects have been created + if _, err := pvc.CreateIfNotExists(c.Client, storageSpec, tablespacePVCName, + newCluster.Name, newCluster.Namespace); err != nil { + return err + } + } } return nil diff --git a/internal/controller/pgpolicy/pgpolicycontroller.go b/internal/controller/pgpolicy/pgpolicycontroller.go index a9eef2e3ec..53ffd4fa78 100644 --- a/internal/controller/pgpolicy/pgpolicycontroller.go +++ b/internal/controller/pgpolicy/pgpolicycontroller.go @@ -1,7 +1,7 @@ package pgpolicy /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -44,8 +44,8 @@ func (c *Controller) onAdd(obj interface{}) { policy := obj.(*crv1.Pgpolicy) log.Debugf("[pgpolicy Controller] onAdd ns=%s %s", policy.ObjectMeta.Namespace, policy.ObjectMeta.SelfLink) - //handle the case of when a pgpolicy is already processed, which - //is the case when the operator restarts + // handle the case of when a pgpolicy is already processed, which + // is the case when the operator restarts if policy.Status.State == crv1.PgpolicyStateProcessed { log.Debug("pgpolicy " + policy.ObjectMeta.Name + " already processed") return @@ -65,7 +65,7 @@ func (c *Controller) onAdd(obj interface{}) { log.Errorf("ERROR updating pgpolicy status: %s", err.Error()) } - //publish event + // publish event topics := make([]string, 1) topics[0] = events.EventTopicPolicy @@ -84,7 +84,6 @@ func (c *Controller) onAdd(obj interface{}) { if err != nil { log.Error(err.Error()) } - } // onUpdate is called when a pgpolicy is updated @@ -98,7 +97,7 @@ func (c *Controller) onDelete(obj interface{}) { log.Debugf("DELETED pgpolicy %s", policy.ObjectMeta.Name) - //publish event + // publish event topics := make([]string, 1) topics[0] = events.EventTopicPolicy @@ -117,12 +116,10 @@ func (c *Controller) onDelete(obj interface{}) { if err != nil { log.Error(err.Error()) } - } // AddPGPolicyEventHandler adds the pgpolicy event handler to the pgpolicy informer func (c *Controller) AddPGPolicyEventHandler() { - c.Informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.onAdd, UpdateFunc: c.onUpdate, diff --git a/internal/controller/pgreplica/pgreplicacontroller.go b/internal/controller/pgreplica/pgreplicacontroller.go index e3d10128c9..2468ad0bc5 100644 --- a/internal/controller/pgreplica/pgreplicacontroller.go +++ b/internal/controller/pgreplica/pgreplicacontroller.go @@ -1,7 +1,7 @@ package pgreplica /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,15 +18,20 @@ limitations under the License. import ( "context" "encoding/json" + "reflect" "strings" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/kubeapi" clusteroperator "github.com/crunchydata/postgres-operator/internal/operator/cluster" + "github.com/crunchydata/postgres-operator/internal/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" informers "github.com/crunchydata/postgres-operator/pkg/generated/informers/externalversions/crunchydata.com/v1" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -34,7 +39,7 @@ import ( // Controller holds the connections for the controller type Controller struct { - Clientset kubeapi.Interface + Client *kubeapi.Client Queue workqueue.RateLimitingInterface Informer informers.PgreplicaInformer PgreplicaWorkerCount int @@ -44,7 +49,6 @@ type Controller struct { // processNextWorkItem function in order to read and process a message on the // workqueue. func (c *Controller) RunWorker(stopCh <-chan struct{}, doneCh chan<- struct{}) { - go c.waitForShutdown(stopCh) for c.processNextItem() { @@ -85,7 +89,7 @@ func (c *Controller) processNextItem() bool { // in this case, the de-dupe logic is to test whether a replica // deployment exists already , if so, then we don't create another // backup job - _, err := c.Clientset. + _, err := c.Client. AppsV1().Deployments(keyNamespace). Get(ctx, keyResourceName, metav1.GetOptions{}) @@ -96,9 +100,9 @@ func (c *Controller) processNextItem() bool { } else { log.Debugf("working...no replica found, means we process") - //handle the case of when a pgreplica is added which is - //scaling up a cluster - replica, err := c.Clientset.CrunchydataV1().Pgreplicas(keyNamespace).Get(ctx, keyResourceName, metav1.GetOptions{}) + // handle the case of when a pgreplica is added which is + // scaling up a cluster + replica, err := c.Client.CrunchydataV1().Pgreplicas(keyNamespace).Get(ctx, keyResourceName, metav1.GetOptions{}) if err != nil { log.Error(err) c.Queue.Forget(key) // NB(cbandy): This should probably be a retry. @@ -106,7 +110,7 @@ func (c *Controller) processNextItem() bool { } // get the pgcluster resource for the cluster the replica is a part of - cluster, err := c.Clientset.CrunchydataV1().Pgclusters(keyNamespace).Get(ctx, replica.Spec.ClusterName, metav1.GetOptions{}) + cluster, err := c.Client.CrunchydataV1().Pgclusters(keyNamespace).Get(ctx, replica.Spec.ClusterName, metav1.GetOptions{}) if err != nil { log.Error(err) c.Queue.Forget(key) // NB(cbandy): This should probably be a retry. @@ -115,7 +119,7 @@ func (c *Controller) processNextItem() bool { // only process pgreplica if cluster has been initialized if cluster.Status.State == crv1.PgclusterStateInitialized { - clusteroperator.ScaleBase(c.Clientset, replica, replica.ObjectMeta.Namespace) + clusteroperator.ScaleBase(c.Client, replica, replica.ObjectMeta.Namespace) patch, err := json.Marshal(map[string]interface{}{ "status": crv1.PgreplicaStatus{ @@ -124,7 +128,7 @@ func (c *Controller) processNextItem() bool { }, }) if err == nil { - _, err = c.Clientset.CrunchydataV1().Pgreplicas(replica.Namespace). + _, err = c.Client.CrunchydataV1().Pgreplicas(replica.Namespace). Patch(ctx, replica.Name, types.MergePatchType, patch, metav1.PatchOptions{}) } if err != nil { @@ -138,7 +142,7 @@ func (c *Controller) processNextItem() bool { }, }) if err == nil { - _, err = c.Clientset.CrunchydataV1().Pgreplicas(replica.Namespace). + _, err = c.Client.CrunchydataV1().Pgreplicas(replica.Namespace). Patch(ctx, replica.Name, types.MergePatchType, patch, metav1.PatchOptions{}) } if err != nil { @@ -155,8 +159,8 @@ func (c *Controller) processNextItem() bool { func (c *Controller) onAdd(obj interface{}) { replica := obj.(*crv1.Pgreplica) - //handle the case of pgreplicas being processed already and - //when the operator restarts + // handle the case of pgreplicas being processed already and + // when the operator restarts if replica.Status.State == crv1.PgreplicaStateProcessed { log.Debug("pgreplica " + replica.ObjectMeta.Name + " already processed") return @@ -167,20 +171,20 @@ func (c *Controller) onAdd(obj interface{}) { log.Debugf("onAdd putting key in queue %s", key) c.Queue.Add(key) } - } // onUpdate is called when a pgreplica is updated func (c *Controller) onUpdate(oldObj, newObj interface{}) { ctx := context.TODO() + oldPgreplica := oldObj.(*crv1.Pgreplica) newPgreplica := newObj.(*crv1.Pgreplica) log.Debugf("[pgreplica Controller] onUpdate ns=%s %s", newPgreplica.ObjectMeta.Namespace, newPgreplica.ObjectMeta.SelfLink) // get the pgcluster resource for the cluster the replica is a part of - cluster, err := c.Clientset. + cluster, err := c.Client. CrunchydataV1().Pgclusters(newPgreplica.Namespace). Get(ctx, newPgreplica.Spec.ClusterName, metav1.GetOptions{}) if err != nil { @@ -190,7 +194,7 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) { // only process pgreplica if cluster has been initialized if cluster.Status.State == crv1.PgclusterStateInitialized && newPgreplica.Spec.Status != "complete" { - clusteroperator.ScaleBase(c.Clientset, newPgreplica, + clusteroperator.ScaleBase(c.Client, newPgreplica, newPgreplica.ObjectMeta.Namespace) patch, err := json.Marshal(map[string]interface{}{ @@ -200,13 +204,63 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) { }, }) if err == nil { - _, err = c.Clientset.CrunchydataV1().Pgreplicas(newPgreplica.Namespace). + _, err = c.Client.CrunchydataV1().Pgreplicas(newPgreplica.Namespace). Patch(ctx, newPgreplica.Name, types.MergePatchType, patch, metav1.PatchOptions{}) } if err != nil { log.Errorf("ERROR updating pgreplica status: %s", err.Error()) } } + + // if the service type changed, updated on the instance + // if there is an error, log but continue + if oldPgreplica.Spec.ServiceType != newPgreplica.Spec.ServiceType { + if err := clusteroperator.UpdateReplicaService(c.Client, cluster, newPgreplica); err != nil { + log.Error(err) + } + } + + // if the tolerations array changed, updated the tolerations on the instance + if !reflect.DeepEqual(oldPgreplica.Spec.Tolerations, newPgreplica.Spec.Tolerations) { + // get the Deployment object associated with this instance + deployment, err := c.Client.AppsV1().Deployments(newPgreplica.Namespace).Get(ctx, + newPgreplica.Name, metav1.GetOptions{}) + + if err != nil { + log.Errorf("could not find instance for pgreplica: %q", err.Error()) + return + } + + // determine the current Pod -- this is required to stop the instance + pods, err := c.Client.CoreV1().Pods(deployment.Namespace).List(ctx, metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), + LabelSelector: fields.OneTermEqualSelector(config.LABEL_DEPLOYMENT_NAME, deployment.Name).String(), + }) + + // Even if there are errors with the Pods, we will continue on updating the + // Deployment + if err != nil { + log.Warn(err) + } else if len(pods.Items) == 0 { + log.Infof("not shutting down PostgreSQL instance [%s] as the Pod cannot be found", deployment.Name) + } else { + // get the first pod off the items list + pod := pods.Items[0] + + // we want to stop PostgreSQL on this instance to ensure all transactions + // are safely flushed before we restart + if err := util.StopPostgreSQLInstance(c.Client, c.Client.Config, &pod, deployment.Name); err != nil { + log.Warn(err) + } + } + + // apply the tolerations and update the Deployment + deployment.Spec.Template.Spec.Tolerations = newPgreplica.Spec.Tolerations + + if _, err := c.Client.AppsV1().Deployments(deployment.Namespace).Update(ctx, deployment, metav1.UpdateOptions{}); err != nil { + log.Errorf("could not update deployment for pgreplica update: %q", err.Error()) + } + } } // onDelete is called when a pgreplica is deleted @@ -215,26 +269,24 @@ func (c *Controller) onDelete(obj interface{}) { replica := obj.(*crv1.Pgreplica) log.Debugf("[pgreplica Controller] OnDelete ns=%s %s", replica.ObjectMeta.Namespace, replica.ObjectMeta.SelfLink) - //make sure we are not removing a replica deployment - //that is now the primary after a failover - dep, err := c.Clientset. + // make sure we are not removing a replica deployment + // that is now the primary after a failover + dep, err := c.Client. AppsV1().Deployments(replica.ObjectMeta.Namespace). Get(ctx, replica.Spec.Name, metav1.GetOptions{}) if err == nil { if dep.ObjectMeta.Labels[config.LABEL_SERVICE_NAME] == dep.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] { - //the replica was made a primary at some point - //we will not scale down the deployment + // the replica was made a primary at some point + // we will not scale down the deployment log.Debugf("[pgreplica Controller] OnDelete not scaling down the replica since it is acting as a primary") } else { - clusteroperator.ScaleDownBase(c.Clientset, replica, replica.ObjectMeta.Namespace) + clusteroperator.ScaleDownBase(c.Client, replica, replica.ObjectMeta.Namespace) } } - } // AddPGReplicaEventHandler adds the pgreplica event handler to the pgreplica informer func (c *Controller) AddPGReplicaEventHandler() { - // Your custom resource event handlers. c.Informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.onAdd, diff --git a/internal/controller/pgtask/backresthandler.go b/internal/controller/pgtask/backresthandler.go index e8f0534e6b..eba65e67b6 100644 --- a/internal/controller/pgtask/backresthandler.go +++ b/internal/controller/pgtask/backresthandler.go @@ -1,7 +1,7 @@ package pgtask /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -56,8 +56,7 @@ func (c *Controller) handleBackrestRestore(task *crv1.Pgtask) { } log.Debugf("pgtask Controller: added restore job for cluster %s", clusterName) - backrestoperator.PublishRestore(cluster.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER], - clusterName, task.ObjectMeta.Labels[config.LABEL_PGOUSER], namespace) + backrestoperator.PublishRestore(clusterName, task.ObjectMeta.Labels[config.LABEL_PGOUSER], namespace) err = backrestoperator.UpdateWorkflow(c.Client, task.Spec.Parameters[crv1.PgtaskWorkflowID], namespace, crv1.PgtaskWorkflowBackrestRestoreJobCreatedStatus) diff --git a/internal/controller/pgtask/pgtaskcontroller.go b/internal/controller/pgtask/pgtaskcontroller.go index 3d3706d5fa..88b59fb37d 100644 --- a/internal/controller/pgtask/pgtaskcontroller.go +++ b/internal/controller/pgtask/pgtaskcontroller.go @@ -1,7 +1,7 @@ package pgtask /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -29,7 +29,9 @@ import ( crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" pgo "github.com/crunchydata/postgres-operator/pkg/generated/clientset/versioned" informers "github.com/crunchydata/postgres-operator/pkg/generated/informers/externalversions/crunchydata.com/v1" + log "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" @@ -48,7 +50,6 @@ type Controller struct { // processNextWorkItem function in order to read and process a message on the // workqueue. func (c *Controller) RunWorker(stopCh <-chan struct{}, doneCh chan<- struct{}) { - go c.waitForShutdown(stopCh) for c.processNextItem() { @@ -93,7 +94,7 @@ func (c *Controller) processNextItem() bool { return true } - //update pgtask + // update pgtask patch, err := json.Marshal(map[string]interface{}{ "status": crv1.PgtaskStatus{ State: crv1.PgtaskStateProcessed, @@ -110,7 +111,7 @@ func (c *Controller) processNextItem() bool { return true } - //process the incoming task + // process the incoming task switch tmpTask.Spec.TaskType { case crv1.PgtaskPgAdminAdd: log.Debug("add pgadmin task added") @@ -121,12 +122,19 @@ func (c *Controller) processNextItem() bool { case crv1.PgtaskUpgrade: log.Debug("upgrade task added") clusteroperator.AddUpgrade(c.Client, tmpTask, keyNamespace) - case crv1.PgtaskFailover: - log.Debug("failover task added") - if !dupeFailover(c.Client, tmpTask, keyNamespace) { - clusteroperator.FailoverBase(keyNamespace, c.Client, tmpTask, c.Client.Config) + case crv1.PgtaskRollingUpdate: + log.Debug("rolling update task added") + // first, attempt to get the pgcluster object + clusterName := tmpTask.Spec.Parameters[config.LABEL_PG_CLUSTER] + + if cluster, err := c.Client.CrunchydataV1().Pgclusters(tmpTask.Namespace). + Get(ctx, clusterName, metav1.GetOptions{}); err == nil { + if err := clusteroperator.RollingUpdate(c.Client, c.Client.Config, cluster, + func(kubeapi.Interface, *crv1.Pgcluster, *appsv1.Deployment) error { return nil }); err != nil { + log.Errorf("rolling update failed: %q", err.Error()) + } } else { - log.Debugf("skipping duplicate onAdd failover task %s/%s", keyNamespace, keyResourceName) + log.Debugf("rolling update failed: could not find cluster %q", clusterName) } case crv1.PgtaskDeleteData: @@ -136,9 +144,6 @@ func (c *Controller) processNextItem() bool { } else { log.Debugf("skipping duplicate onAdd delete data task %s/%s", keyNamespace, keyResourceName) } - case crv1.PgtaskDeleteBackups: - log.Debug("delete backups task added") - taskoperator.RemoveBackups(keyNamespace, c.Client, tmpTask) case crv1.PgtaskBackrest: log.Debug("backrest task added") backrestoperator.Backrest(keyNamespace, c.Client, tmpTask) @@ -152,9 +157,6 @@ func (c *Controller) processNextItem() bool { case crv1.PgtaskpgRestore: log.Debug("pgDump restore task added") pgdumpoperator.Restore(keyNamespace, c.Client, tmpTask) - - case crv1.PgtaskAutoFailover: - log.Debugf("autofailover task added %s", keyResourceName) case crv1.PgtaskWorkflow: log.Debugf("workflow task added [%s] ID [%s]", keyResourceName, tmpTask.Spec.Parameters[crv1.PgtaskWorkflowID]) @@ -164,15 +166,14 @@ func (c *Controller) processNextItem() bool { c.Queue.Forget(key) return true - } // onAdd is called when a pgtask is added func (c *Controller) onAdd(obj interface{}) { task := obj.(*crv1.Pgtask) - //handle the case of when the operator restarts, we do not want - //to process pgtasks already processed + // handle the case of when the operator restarts, we do not want + // to process pgtasks already processed if task.Status.State == crv1.PgtaskStateProcessed { log.Debug("pgtask " + task.ObjectMeta.Name + " already processed") return @@ -183,12 +184,11 @@ func (c *Controller) onAdd(obj interface{}) { log.Debugf("task putting key in queue %s", key) c.Queue.Add(key) } - } // onUpdate is called when a pgtask is updated func (c *Controller) onUpdate(oldObj, newObj interface{}) { - //task := newObj.(*crv1.Pgtask) + // task := newObj.(*crv1.Pgtask) // log.Debugf("[Controller] onUpdate ns=%s %s", task.ObjectMeta.Namespace, task.ObjectMeta.SelfLink) } @@ -198,7 +198,6 @@ func (c *Controller) onDelete(obj interface{}) { // AddPGTaskEventHandler adds the pgtask event handler to the pgtask informer func (c *Controller) AddPGTaskEventHandler() { - c.Informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.onAdd, UpdateFunc: c.onUpdate, @@ -208,32 +207,14 @@ func (c *Controller) AddPGTaskEventHandler() { log.Debugf("pgtask Controller: added event handler to informer") } -//de-dupe logic for a failover, if the failover started -//parameter is set, it means a failover has already been -//started on this -func dupeFailover(clientset pgo.Interface, task *crv1.Pgtask, ns string) bool { - ctx := context.TODO() - tmp, err := clientset.CrunchydataV1().Pgtasks(ns).Get(ctx, task.Spec.Name, metav1.GetOptions{}) - if err != nil { - //a big time error if this occurs - return false - } - - if tmp.Spec.Parameters[config.LABEL_FAILOVER_STARTED] == "" { - return false - } - - return true -} - -//de-dupe logic for a delete data, if the delete data job started -//parameter is set, it means a delete data job has already been -//started on this +// de-dupe logic for a delete data, if the delete data job started +// parameter is set, it means a delete data job has already been +// started on this func dupeDeleteData(clientset pgo.Interface, task *crv1.Pgtask, ns string) bool { ctx := context.TODO() tmp, err := clientset.CrunchydataV1().Pgtasks(ns).Get(ctx, task.Spec.Name, metav1.GetOptions{}) if err != nil { - //a big time error if this occurs + // a big time error if this occurs return false } diff --git a/internal/controller/pod/inithandler.go b/internal/controller/pod/inithandler.go index 64b3134e8c..d57fc72a64 100644 --- a/internal/controller/pod/inithandler.go +++ b/internal/controller/pod/inithandler.go @@ -1,7 +1,7 @@ package pod /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,7 +18,7 @@ limitations under the License. import ( "context" "fmt" - "strconv" + "time" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/controller" @@ -29,18 +29,19 @@ import ( taskoperator "github.com/crunchydata/postgres-operator/internal/operator/task" "github.com/crunchydata/postgres-operator/internal/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + + log "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" - - log "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/util/wait" ) // handleClusterInit is responsible for proceeding with initialization of the PG cluster once the // primary PG pod for a new or restored PG cluster reaches a ready status func (c *Controller) handleClusterInit(newPod *apiv1.Pod, cluster *crv1.Pgcluster) error { - clusterName := cluster.GetName() // first check to see if the update is a repo pod. If so, then call repo init handler and @@ -76,7 +77,6 @@ func (c *Controller) handleClusterInit(newPod *apiv1.Pod, cluster *crv1.Pgcluste // handleBackRestRepoInit handles cluster initialization tasks that must be executed once // as a result of an update to a cluster's pgBackRest repository pod func (c *Controller) handleBackRestRepoInit(newPod *apiv1.Pod, cluster *crv1.Pgcluster) error { - // if the repo pod is for a cluster bootstrap, the kick of the bootstrap job and return if _, ok := newPod.GetLabels()[config.LABEL_PGHA_BOOTSTRAP]; ok { if err := clusteroperator.AddClusterBootstrap(c.Client, cluster); err != nil { @@ -86,6 +86,16 @@ func (c *Controller) handleBackRestRepoInit(newPod *apiv1.Pod, cluster *crv1.Pgc return nil } + // first: a sanity check that there exists a primary deployment to scale. this + // is to attempt to avoid any silent failures in the deployment scaling + // function. + // + // If we do encounter an error, we will proceed in case the deployment becomes + // available after. + if err := c.waitForPrimaryDeployment(cluster); err != nil { + log.Warn(err) + } + clusterInfo, err := clusteroperator.ScaleClusterDeployments(c.Client, *cluster, 1, true, false, false, false) if err != nil { @@ -103,21 +113,21 @@ func (c *Controller) handleBackRestRepoInit(newPod *apiv1.Pod, cluster *crv1.Pgc // regardless of the specific type of cluster (e.g. regualar or standby) or the reason the // cluster is being initialized (initial bootstrap or restore) func (c *Controller) handleCommonInit(cluster *crv1.Pgcluster) error { + // Disable autofailover in the cluster that is now "Ready" if autofilover + // is disabled for the cluster + if cluster.Spec.DisableAutofail { + // accepts the inverse + if err := util.ToggleAutoFailover(c.Client, !cluster.Spec.DisableAutofail, + cluster.ObjectMeta.Labels[config.LABEL_PGHA_SCOPE], cluster.Namespace); err != nil { + log.Error(err) + } + } - // Disable autofailover in the cluster that is now "Ready" if the autofail label is set - // to "false" on the pgcluster (i.e. label "autofail=true") - autofailEnabled, err := strconv.ParseBool(cluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL]) - if err != nil { + if err := operator.UpdatePGHAConfigInitFlag(c.Client, false, cluster.Name, + cluster.Namespace); err != nil { log.Error(err) - return err - } else if !autofailEnabled { - util.ToggleAutoFailover(c.Client, false, - cluster.ObjectMeta.Labels[config.LABEL_PGHA_SCOPE], cluster.Namespace) } - operator.UpdatePGHAConfigInitFlag(c.Client, false, cluster.Name, - cluster.Namespace) - return nil } @@ -149,9 +159,8 @@ func (c *Controller) handleBootstrapInit(newPod *apiv1.Pod, cluster *crv1.Pgclus taskoperator.CompleteCreateClusterWorkflow(clusterName, c.Client, namespace) - //publish event for cluster complete - publishClusterComplete(clusterName, namespace, cluster) - // + // publish event for cluster complete + _ = publishClusterComplete(clusterName, namespace, cluster) // first clean any stanza create resources from a previous stanza-create, e.g. during a // restore when these resources may already exist from initial creation of the cluster @@ -185,18 +194,18 @@ func (c *Controller) handleStandbyInit(cluster *crv1.Pgcluster) error { taskoperator.CompleteCreateClusterWorkflow(clusterName, c.Client, namespace) - //publish event for cluster complete - publishClusterComplete(clusterName, namespace, cluster) - // + // publish event for cluster complete + _ = publishClusterComplete(clusterName, namespace, cluster) // now scale any replicas deployments to 1 - clusteroperator.ScaleClusterDeployments(c.Client, *cluster, 1, false, true, false, false) + _, _ = clusteroperator.ScaleClusterDeployments(c.Client, *cluster, 1, false, true, false, false) // Proceed with stanza-creation of this is not a standby cluster, or if its // a standby cluster that does not have "s3" storage only enabled. // If this is a standby cluster and the pgBackRest storage type is set // to "s3" for S3 storage only, set the cluster to an initialized status. - if cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE] != "s3" { + if !(len(cluster.Spec.BackrestStorageTypes) == 1 && + cluster.Spec.BackrestStorageTypes[0] == crv1.BackrestStorageTypeS3) { // first try to delete any existing stanza create task and/or job if err := c.Client.CrunchydataV1().Pgtasks(namespace). Delete(ctx, fmt.Sprintf("%s-%s", clusterName, crv1.PgtaskBackrestStanzaCreate), @@ -212,15 +221,21 @@ func (c *Controller) handleStandbyInit(cluster *crv1.Pgcluster) error { } backrestoperator.StanzaCreate(namespace, clusterName, c.Client) } else { - controller.SetClusterInitializedStatus(c.Client, clusterName, namespace) - } + if err := controller.SetClusterInitializedStatus(c.Client, clusterName, + namespace); err != nil { + log.Error(err) + } - // If a standby cluster initialize the creation of any replicas. Replicas - // can be initialized right away, i.e. there is no dependency on - // stanza-creation and/or the creation of any backups, since the replicas - // will be generated from the pgBackRest repository of an external PostgreSQL - // database (which should already exist). - controller.InitializeReplicaCreation(c.Client, clusterName, namespace) + // If a standby cluster with s3 only initialize the creation of any replicas. Replicas + // can be initialized right away, i.e. there is no dependency on + // stanza-creation and/or the creation of any backups, since the replicas + // will be generated from the pgBackRest repository of an external PostgreSQL + // database (which should already exist). + if err := controller.InitializeReplicaCreation(c.Client, clusterName, + namespace); err != nil { + log.Error(err) + } + } // if this is a pgbouncer enabled cluster, add a pgbouncer // Note: we only warn if we cannot create the pgBouncer, so eecution can @@ -260,13 +275,13 @@ func (c *Controller) labelPostgresPodAndDeployment(newpod *apiv1.Pod) { log.Debug("which means its pod was restarted for some reason") log.Debug("we will use the service name on the deployment") serviceName = dep.ObjectMeta.Labels[config.LABEL_SERVICE_NAME] - } else if replica == false { + } else if !replica { log.Debugf("primary pod ADDED %s service-name=%s", newpod.Name, newpod.ObjectMeta.Labels[config.LABEL_PG_CLUSTER]) - //add label onto pod "service-name=clustername" + // add label onto pod "service-name=clustername" serviceName = newpod.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] - } else if replica == true { + } else if replica { log.Debugf("replica pod ADDED %s service-name=%s", newpod.Name, newpod.ObjectMeta.Labels[config.LABEL_PG_CLUSTER]+"-replica") - //add label onto pod "service-name=clustername-replica" + // add label onto pod "service-name=clustername-replica" serviceName = newpod.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] + "-replica" } @@ -281,12 +296,42 @@ func (c *Controller) labelPostgresPodAndDeployment(newpod *apiv1.Pod) { return } - //add the service name label to the Deployment + // add the service name label to the Deployment log.Debugf("patching deployment %s: %s", dep.Name, patch) _, err = c.Client.AppsV1().Deployments(ns).Patch(ctx, dep.Name, types.MergePatchType, patch, metav1.PatchOptions{}) if err != nil { log.Error("could not add label to deployment on pod add") return } +} +// waitForPrimaryDeployment checks to see that a primary deployment is +// available. It does not check readiness, only that the deployment exists. This +// used before scaling to ensure scaling does not fail silently +func (c *Controller) waitForPrimaryDeployment(cluster *crv1.Pgcluster) error { + ctx := context.TODO() + primaryDeploymentName := cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY] + options := metav1.ListOptions{ + LabelSelector: fields.AndSelectors( + fields.OneTermEqualSelector(config.LABEL_PG_CLUSTER, cluster.Name), + fields.OneTermEqualSelector(config.LABEL_PG_DATABASE, config.LABEL_TRUE), + fields.OneTermEqualSelector(config.LABEL_DEPLOYMENT_NAME, primaryDeploymentName), + ).String(), + } + + // start polling to see if the primary deployment is created + if err := wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) { + // check to see if the deployment exists + d, err := c.Client.AppsV1().Deployments(cluster.Namespace).List(ctx, options) + + if err != nil { + log.Warnf("could not find primary deployment for scaling: %s", err) + } + + return err == nil && len(d.Items) > 0, nil + }); err != nil { + return fmt.Errorf("primary deployment lookup timeout reached for %q", primaryDeploymentName) + } + + return nil } diff --git a/internal/controller/pod/podcontroller.go b/internal/controller/pod/podcontroller.go index 95b916df5e..58b90b9917 100644 --- a/internal/controller/pod/podcontroller.go +++ b/internal/controller/pod/podcontroller.go @@ -1,7 +1,7 @@ package pod /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -40,17 +40,16 @@ type Controller struct { // onAdd is called when a pod is added func (c *Controller) onAdd(obj interface{}) { - newPod := obj.(*apiv1.Pod) newPodLabels := newPod.GetObjectMeta().GetLabels() - //only process pods with with vendor=crunchydata label + // only process pods with with vendor=crunchydata label if newPodLabels[config.LABEL_VENDOR] == "crunchydata" { log.Debugf("Pod Controller: onAdd processing the addition of pod %s in namespace %s", newPod.Name, newPod.Namespace) } - //handle the case when a pg database pod is added + // handle the case when a pg database pod is added if isPostgresPod(newPod) { c.labelPostgresPodAndDeployment(newPod) return @@ -65,7 +64,7 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) { newPodLabels := newPod.GetObjectMeta().GetLabels() - //only process pods with with vendor=crunchydata label + // only process pods with with vendor=crunchydata label if newPodLabels[config.LABEL_VENDOR] != "crunchydata" { return } @@ -153,7 +152,6 @@ func setCurrentPrimary(clientset pgo.Interface, newPod *apiv1.Pod, cluster *crv1 // onDelete is called when a pgcluster is deleted func (c *Controller) onDelete(obj interface{}) { - pod := obj.(*apiv1.Pod) labels := pod.GetObjectMeta().GetLabels() @@ -165,7 +163,6 @@ func (c *Controller) onDelete(obj interface{}) { // AddPodEventHandler adds the pod event handler to the pod informer func (c *Controller) AddPodEventHandler() { - c.Informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: c.onAdd, UpdateFunc: c.onUpdate, @@ -190,7 +187,6 @@ func isBackRestRepoBecomingReady(oldPod, newPod *apiv1.Pod) bool { // assumed to be present), specifically because this label will only be included on pgBackRest // repository Pods. func isBackRestRepoPod(newpod *apiv1.Pod) bool { - _, backrestRepoLabelExists := newpod.ObjectMeta.Labels[config.LABEL_PGO_BACKREST_REPO] return backrestRepoLabelExists @@ -237,7 +233,6 @@ func isDBContainerBecomingReady(oldPod, newPod *apiv1.Pod) bool { // this label will only be included on primary and replica PostgreSQL database pods (and will be // present as soon as the deployment and pod is created). func isPostgresPod(newpod *apiv1.Pod) bool { - _, pgDatabaseLabelExists := newpod.ObjectMeta.Labels[config.LABEL_PG_DATABASE] return pgDatabaseLabelExists diff --git a/internal/controller/pod/podevents.go b/internal/controller/pod/podevents.go index 100175baed..00e901458f 100644 --- a/internal/controller/pod/podevents.go +++ b/internal/controller/pod/podevents.go @@ -1,7 +1,7 @@ package pod /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -25,7 +25,7 @@ import ( ) func publishClusterComplete(clusterName, namespace string, cluster *crv1.Pgcluster) error { - //capture the cluster creation event + // capture the cluster creation event topics := make([]string, 1) topics[0] = events.EventTopicCluster @@ -47,5 +47,4 @@ func publishClusterComplete(clusterName, namespace string, cluster *crv1.Pgclust return err } return err - } diff --git a/internal/controller/pod/promotionhandler.go b/internal/controller/pod/promotionhandler.go index dcdcf48590..b7de3fc2df 100644 --- a/internal/controller/pod/promotionhandler.go +++ b/internal/controller/pod/promotionhandler.go @@ -1,7 +1,7 @@ package pod /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -31,13 +31,19 @@ import ( log "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) +const ( + // recoverySQL is just the SQL to figure out if Postgres is in recovery mode + recoverySQL = "SELECT pg_is_in_recovery();" +) + var ( // isInRecoveryCommand is the command run to determine if postgres is in recovery - isInRecoveryCMD []string = []string{"psql", "-t", "-c", "'SELECT pg_is_in_recovery();'", "-p"} + isInRecoveryCMD []string = []string{"psql", "-t", "-c", recoverySQL, "-p"} // leaderStatusCMD is the command run to get the Patroni status for the primary leaderStatusCMD []string = []string{"curl", fmt.Sprintf("localhost:%s/master", @@ -56,14 +62,14 @@ var ( // of a failover. Specifically, this handler is triggered when a replica has been promoted, and // it now has either the "promoted" or "primary" role label. func (c *Controller) handlePostgresPodPromotion(newPod *apiv1.Pod, cluster crv1.Pgcluster) error { - if cluster.Status.State == crv1.PgclusterStateShutdown { if err := c.handleStartupInit(cluster); err != nil { return err } } - if cluster.Status.State == crv1.PgclusterStateInitialized { + // create a post-failover backup if not a standby cluster + if !cluster.Spec.Standby && cluster.Status.State == crv1.PgclusterStateInitialized { if err := cleanAndCreatePostFailoverBackup(c.Client, cluster.Name, newPod.Namespace); err != nil { log.Error(err) @@ -77,7 +83,6 @@ func (c *Controller) handlePostgresPodPromotion(newPod *apiv1.Pod, cluster crv1. // handleStartupInit is resposible for handling cluster initilization for a cluster that has been // restarted (after it was previously shutdown) func (c *Controller) handleStartupInit(cluster crv1.Pgcluster) error { - // since the cluster is just being restarted, it can just be set to initialized once the // primary is ready if err := controller.SetClusterInitializedStatus(c.Client, cluster.Name, @@ -87,7 +92,7 @@ func (c *Controller) handleStartupInit(cluster crv1.Pgcluster) error { } // now scale any replicas deployments to 1 - clusteroperator.ScaleClusterDeployments(c.Client, cluster, 1, false, true, false, false) + _, _ = clusteroperator.ScaleClusterDeployments(c.Client, cluster, 1, false, true, false, false) return nil } @@ -96,7 +101,6 @@ func (c *Controller) handleStartupInit(cluster crv1.Pgcluster) error { // of disabling standby mode. Specifically, this handler is triggered when a standby leader // is turned into a regular leader. func (c *Controller) handleStandbyPromotion(newPod *apiv1.Pod, cluster crv1.Pgcluster) error { - clusterName := cluster.Name namespace := cluster.Namespace @@ -104,6 +108,14 @@ func (c *Controller) handleStandbyPromotion(newPod *apiv1.Pod, cluster crv1.Pgcl return err } + // rotate the exporter password if the metrics sidecar is enabled + if cluster.Spec.Exporter { + if err := clusteroperator.RotateExporterPassword(c.Client, c.Client.Config, &cluster); err != nil { + log.Error(err) + return err + } + } + // rotate the pgBouncer passwords if pgbouncer is enabled within the cluster if cluster.Spec.PgBouncer.Enabled() { if err := clusteroperator.RotatePgBouncerPassword(c.Client, c.Client.Config, &cluster); err != nil { @@ -126,44 +138,40 @@ func (c *Controller) handleStandbyPromotion(newPod *apiv1.Pod, cluster crv1.Pgcl // done by confirming func waitForStandbyPromotion(restConfig *rest.Config, clientset kubernetes.Interface, newPod apiv1.Pod, cluster crv1.Pgcluster) error { - var recoveryDisabled bool // wait for the server to accept writes to ensure standby has truly been disabled before // proceeding - duration := time.After(isStandbyDisabledTimeout) - tick := time.NewTicker(isStandbyDisabledTick) - defer tick.Stop() - for { - select { - case <-duration: - return fmt.Errorf("timed out waiting for cluster %s to accept writes after disabling "+ - "standby mode", cluster.Name) - case <-tick.C: + if err := wait.Poll(isStandbyDisabledTick, isStandbyDisabledTimeout, func() (bool, error) { + if !recoveryDisabled { + cmd := isInRecoveryCMD + cmd = append(cmd, cluster.Spec.Port) + + isInRecoveryStr, _, _ := kubeapi.ExecToPodThroughAPI(restConfig, clientset, + cmd, "database", newPod.Name, newPod.Namespace, nil) + + recoveryDisabled = strings.Contains(isInRecoveryStr, "f") + if !recoveryDisabled { - cmd := isInRecoveryCMD - cmd = append(cmd, cluster.Spec.Port) - - isInRecoveryStr, _, _ := kubeapi.ExecToPodThroughAPI(restConfig, clientset, - cmd, newPod.Spec.Containers[0].Name, newPod.Name, - newPod.Namespace, nil) - if strings.Contains(isInRecoveryStr, "f") { - recoveryDisabled = true - } - } - if recoveryDisabled { - primaryJSONStr, _, _ := kubeapi.ExecToPodThroughAPI(restConfig, clientset, - leaderStatusCMD, newPod.Spec.Containers[0].Name, newPod.Name, - newPod.Namespace, nil) - var primaryJSON map[string]interface{} - json.Unmarshal([]byte(primaryJSONStr), &primaryJSON) - if primaryJSON["state"] == "running" && (primaryJSON["pending_restart"] == nil || - !primaryJSON["pending_restart"].(bool)) { - return nil - } + return false, nil } } + + primaryJSONStr, _, _ := kubeapi.ExecToPodThroughAPI(restConfig, clientset, + leaderStatusCMD, newPod.Spec.Containers[0].Name, newPod.Name, + newPod.Namespace, nil) + + primaryJSON := map[string]interface{}{} + _ = json.Unmarshal([]byte(primaryJSONStr), &primaryJSON) + + return (primaryJSON["state"] == "running" && (primaryJSON["pending_restart"] == nil || + !primaryJSON["pending_restart"].(bool))), nil + }); err != nil { + return fmt.Errorf("timed out waiting for cluster %s to accept writes after disabling "+ + "standby mode", cluster.Name) } + + return nil } // cleanAndCreatePostFailoverBackup cleans up any existing backup resources and then creates @@ -171,7 +179,7 @@ func waitForStandbyPromotion(restConfig *rest.Config, clientset kubernetes.Inter func cleanAndCreatePostFailoverBackup(clientset kubeapi.Interface, clusterName, namespace string) error { ctx := context.TODO() - //look up the backrest-repo pod name + // look up the backrest-repo pod name selector := fmt.Sprintf("%s=%s,%s=true", config.LABEL_PG_CLUSTER, clusterName, config.LABEL_PGO_BACKREST_REPO) pods, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) diff --git a/internal/kubeapi/client_config.go b/internal/kubeapi/client_config.go index 5d070fc25e..883b8d12fa 100644 --- a/internal/kubeapi/client_config.go +++ b/internal/kubeapi/client_config.go @@ -1,7 +1,7 @@ package kubeapi /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -37,8 +37,10 @@ type Interface interface { } // Interface should satisfy both our typed Interface and the standard one. -var _ crunchydata.Interface = Interface(nil) -var _ kubernetes.Interface = Interface(nil) +var ( + _ crunchydata.Interface = Interface(nil) + _ kubernetes.Interface = Interface(nil) +) // Client provides methods for interacting with Kubernetes resources. // It implements both kubernetes and crunchydata clientset Interfaces. @@ -55,7 +57,9 @@ var _ Interface = &Client{} // CrunchydataV1 retrieves the CrunchydataV1Client func (c *Client) CrunchydataV1() crunchydatav1.CrunchydataV1Interface { return c.crunchydataV1 } -func loadClientConfig() (*rest.Config, error) { +// LoadClientConfig prepares a configuration from the environment or home directory, +// falling back to in-cluster when applicable. +func LoadClientConfig() (*rest.Config, error) { // The default loading rules try to read from the files specified in the // environment or from the home directory. loader := clientcmd.NewDefaultClientConfigLoadingRules() @@ -69,11 +73,18 @@ func loadClientConfig() (*rest.Config, error) { // NewClient returns a kubernetes.Clientset and its underlying configuration. func NewClient() (*Client, error) { - config, err := loadClientConfig() + config, err := LoadClientConfig() if err != nil { return nil, err } + return NewClientForConfig(config) +} + +// NewClientForConfig returns a kubernetes.Clientset using config. +func NewClientForConfig(config *rest.Config) (*Client, error) { + var err error + // Match the settings applied by sigs.k8s.io/controller-runtime@v0.6.0; // see https://github.com/kubernetes-sigs/controller-runtime/issues/365. if config.QPS == 0.0 { diff --git a/internal/kubeapi/endpoints.go b/internal/kubeapi/endpoints.go index 232469fe10..bb8517439c 100644 --- a/internal/kubeapi/endpoints.go +++ b/internal/kubeapi/endpoints.go @@ -1,7 +1,7 @@ package kubeapi /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/kubeapi/errors.go b/internal/kubeapi/errors.go index 829ca9f097..783ed065ee 100644 --- a/internal/kubeapi/errors.go +++ b/internal/kubeapi/errors.go @@ -1,7 +1,7 @@ package kubeapi /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/kubeapi/exec.go b/internal/kubeapi/exec.go index b2e994d84d..29811e0f89 100644 --- a/internal/kubeapi/exec.go +++ b/internal/kubeapi/exec.go @@ -1,7 +1,7 @@ package kubeapi /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/kubeapi/fake/clientset.go b/internal/kubeapi/fake/clientset.go index 7fbd74b802..3265e42280 100644 --- a/internal/kubeapi/fake/clientset.go +++ b/internal/kubeapi/fake/clientset.go @@ -1,7 +1,7 @@ package fake /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/kubeapi/fake/fakeclients.go b/internal/kubeapi/fake/fakeclients.go index 6a263818d4..04e950e622 100644 --- a/internal/kubeapi/fake/fakeclients.go +++ b/internal/kubeapi/fake/fakeclients.go @@ -1,7 +1,7 @@ package fake /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -55,7 +55,6 @@ var ( // initialization of the Operator in various unit tests where the various resources loaded // during initialization (e.g. templates, config and/or global variables) are required. func NewFakePGOClient() (kubeapi.Interface, error) { - if pgoRoot == "" { return nil, errors.New("Environment variable PGOROOT must be set to the root directory " + "of the PostgreSQL Operator project repository in order to create a fake client") @@ -84,7 +83,6 @@ func NewFakePGOClient() (kubeapi.Interface, error) { // utilized when testing to similate and environment containing the various PostgreSQL Operator // configuration files (e.g. templates) required to run the Operator. func createMockPGOConfigMap(pgoNamespace string) (*v1.ConfigMap, error) { - // create a configMap that will hold the default configs pgoConfigMap := &v1.ConfigMap{ Data: make(map[string]string), diff --git a/internal/kubeapi/patch.go b/internal/kubeapi/patch.go index fcaf83a432..5ce73077ab 100644 --- a/internal/kubeapi/patch.go +++ b/internal/kubeapi/patch.go @@ -1,7 +1,7 @@ package kubeapi /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/kubeapi/patch_test.go b/internal/kubeapi/patch_test.go index fa270e340c..ddb41a4552 100644 --- a/internal/kubeapi/patch_test.go +++ b/internal/kubeapi/patch_test.go @@ -1,7 +1,7 @@ package kubeapi /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/kubeapi/volumes.go b/internal/kubeapi/volumes.go index 05412672ac..6014ee39f4 100644 --- a/internal/kubeapi/volumes.go +++ b/internal/kubeapi/volumes.go @@ -1,7 +1,7 @@ package kubeapi /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/kubeapi/volumes_test.go b/internal/kubeapi/volumes_test.go index b793ac5269..6e43b3e5ca 100644 --- a/internal/kubeapi/volumes_test.go +++ b/internal/kubeapi/volumes_test.go @@ -1,7 +1,7 @@ package kubeapi /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -26,7 +26,7 @@ func TestFindOrAppendVolume(t *testing.T) { t.Run("empty", func(t *testing.T) { var volumes []v1.Volume - var volume = FindOrAppendVolume(&volumes, "v1") + volume := FindOrAppendVolume(&volumes, "v1") if expected, actual := 1, len(volumes); expected != actual { t.Fatalf("expected appended volume, got %v", actual) } @@ -69,7 +69,7 @@ func TestFindOrAppendVolumeMount(t *testing.T) { t.Run("empty", func(t *testing.T) { var mounts []v1.VolumeMount - var mount = FindOrAppendVolumeMount(&mounts, "v1") + mount := FindOrAppendVolumeMount(&mounts, "v1") if expected, actual := 1, len(mounts); expected != actual { t.Fatalf("expected appended mount, got %v", actual) } diff --git a/internal/logging/loglib.go b/internal/logging/loglib.go index b443e47b4d..846544c9f5 100644 --- a/internal/logging/loglib.go +++ b/internal/logging/loglib.go @@ -1,8 +1,8 @@ -//Package logging Functions to set unique configuration for use with the logrus logger +// Package logging Functions to set unique configuration for use with the logrus logger package logging /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -34,7 +34,7 @@ func SetParameters() LogValues { return logval } -//LogValues holds the standard log value types +// LogValues holds the standard log value types type LogValues struct { version string } @@ -53,9 +53,9 @@ func (f *formatter) Format(e *log.Entry) ([]byte, error) { return f.lf.Format(e) } -//CrunchyLogger adds the customized logging fields to the logrus instance context +// CrunchyLogger adds the customized logging fields to the logrus instance context func CrunchyLogger(logDetails LogValues) { - //Sets calling method as a field + // Sets calling method as a field log.SetReportCaller(true) crunchyTextFormatter := &log.TextFormatter{ diff --git a/internal/ns/nslogic.go b/internal/ns/nslogic.go index 21f0499b7f..6148f33de7 100644 --- a/internal/ns/nslogic.go +++ b/internal/ns/nslogic.go @@ -1,7 +1,7 @@ package ns /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -43,20 +43,28 @@ import ( "k8s.io/client-go/kubernetes/fake" ) -const OPERATOR_SERVICE_ACCOUNT = "postgres-operator" -const PGO_DEFAULT_SERVICE_ACCOUNT = "pgo-default" +const ( + OPERATOR_SERVICE_ACCOUNT = "postgres-operator" + PGO_DEFAULT_SERVICE_ACCOUNT = "pgo-default" +) -const PGO_TARGET_ROLE = "pgo-target-role" -const PGO_TARGET_ROLE_BINDING = "pgo-target-role-binding" -const PGO_TARGET_SERVICE_ACCOUNT = "pgo-target" +const ( + PGO_TARGET_ROLE = "pgo-target-role" + PGO_TARGET_ROLE_BINDING = "pgo-target-role-binding" + PGO_TARGET_SERVICE_ACCOUNT = "pgo-target" +) -const PGO_BACKREST_ROLE = "pgo-backrest-role" -const PGO_BACKREST_SERVICE_ACCOUNT = "pgo-backrest" -const PGO_BACKREST_ROLE_BINDING = "pgo-backrest-role-binding" +const ( + PGO_BACKREST_ROLE = "pgo-backrest-role" + PGO_BACKREST_SERVICE_ACCOUNT = "pgo-backrest" + PGO_BACKREST_ROLE_BINDING = "pgo-backrest-role-binding" +) -const PGO_PG_ROLE = "pgo-pg-role" -const PGO_PG_ROLE_BINDING = "pgo-pg-role-binding" -const PGO_PG_SERVICE_ACCOUNT = "pgo-pg" +const ( + PGO_PG_ROLE = "pgo-pg-role" + PGO_PG_ROLE_BINDING = "pgo-pg-role-binding" + PGO_PG_SERVICE_ACCOUNT = "pgo-pg" +) // PgoServiceAccount is used to populate the following ServiceAccount templates: // pgo-default-sa.json @@ -135,7 +143,6 @@ var ( // CreateFakeNamespaceClient creates a fake namespace client for use with the "disabled" namespace // operating mode func CreateFakeNamespaceClient(installationName string) (kubernetes.Interface, error) { - var namespaces []runtime.Object for _, namespace := range getNamespacesFromEnv() { namespaces = append(namespaces, &v1.Namespace{ @@ -161,7 +168,7 @@ func CreateNamespace(clientset kubernetes.Interface, installationName, pgoNamesp log.Debugf("CreateNamespace %s %s %s", pgoNamespace, createdBy, newNs) - //define the new namespace + // define the new namespace n := v1.Namespace{} n.ObjectMeta.Labels = make(map[string]string) n.ObjectMeta.Labels[config.LABEL_VENDOR] = config.LABEL_CRUNCHY @@ -177,7 +184,7 @@ func CreateNamespace(clientset kubernetes.Interface, installationName, pgoNamesp log.Debugf("CreateNamespace %s created by %s", newNs, createdBy) - //publish event + // publish event topics := make([]string, 1) topics[0] = events.EventTopicPGO @@ -206,7 +213,7 @@ func DeleteNamespace(clientset kubernetes.Interface, installationName, pgoNamesp log.Debugf("DeleteNamespace %s deleted by %s", ns, deletedBy) - //publish the namespace delete event + // publish the namespace delete event topics := make([]string, 1) topics[0] = events.EventTopicPGO @@ -441,7 +448,7 @@ func UpdateNamespace(clientset kubernetes.Interface, installationName, pgoNamesp return err } - //publish event + // publish event topics := make([]string, 1) topics[0] = events.EventTopicPGO @@ -567,7 +574,6 @@ func GetCurrentNamespaceList(clientset kubernetes.Interface, func ValidateNamespacesWatched(clientset kubernetes.Interface, namespaceOperatingMode NamespaceOperatingMode, installationName string, namespaces ...string) error { - var err error var currNSList []string if namespaceOperatingMode != NamespaceOperatingModeDisabled { @@ -640,7 +646,6 @@ func ValidateNamespaceNames(namespace ...string) error { // (please see the various NamespaceOperatingMode types for a detailed explanation of each // operating mode). func GetNamespaceOperatingMode(clientset kubernetes.Interface) (NamespaceOperatingMode, error) { - // first check to see if dynamic namespace capabilities can be enabled isDynamic, err := CheckAccessPrivs(clientset, namespacePrivsCoreDynamic, "", "") if err != nil { @@ -710,7 +715,6 @@ func CheckAccessPrivs(clientset kubernetes.Interface, func GetInitialNamespaceList(clientset kubernetes.Interface, namespaceOperatingMode NamespaceOperatingMode, installationName, pgoNamespace string) ([]string, error) { - // next grab the namespaces provided using the NAMESPACE env var namespaceList := getNamespacesFromEnv() diff --git a/internal/operator/backrest/backup.go b/internal/operator/backrest/backup.go index 2351fe0b2b..c624ed6f25 100644 --- a/internal/operator/backrest/backup.go +++ b/internal/operator/backrest/backup.go @@ -1,7 +1,7 @@ package backrest /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -19,6 +19,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "os" "regexp" @@ -37,6 +38,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" ) @@ -48,48 +50,76 @@ type backrestJobTemplateFields struct { CommandOpts string PITRTarget string PodName string - PGOImagePrefix string - PGOImageTag string + CCPImagePrefix string + CCPImageTag string SecurityContext string PgbackrestStanza string PgbackrestDBPath string - PgbackrestRepoPath string - PgbackrestRepoType string + PgbackrestRepo1Path string + PgbackrestRepo1Type crv1.BackrestStorageType BackrestLocalAndS3Storage bool PgbackrestS3VerifyTLS string PgbackrestRestoreVolumes string PgbackrestRestoreVolumeMounts string + Tolerations string } -var backrestPgHostRegex = regexp.MustCompile("--db-host|--pg1-host") -var backrestPgPathRegex = regexp.MustCompile("--db-path|--pg1-path") +var ( + backrestPgHostRegex = regexp.MustCompile("--db-host|--pg1-host") + backrestPgPathRegex = regexp.MustCompile("--db-path|--pg1-path") +) // Backrest ... -func Backrest(namespace string, clientset kubernetes.Interface, task *crv1.Pgtask) { +func Backrest(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { ctx := context.TODO() - //create the Job to run the backrest command + // get the cluster that is requesting the backup. if we cannot get the cluster + // do not take the backup + cluster, err := clientset.CrunchydataV1().Pgclusters(task.Namespace).Get(ctx, + task.Spec.Parameters[config.LABEL_PG_CLUSTER], metav1.GetOptions{}) + + if err != nil { + log.Error(err) + return + } cmd := task.Spec.Parameters[config.LABEL_BACKREST_COMMAND] + // determine the repo type. we need to make a special check for a standby + // cluster (see below) + repoType := operator.GetRepoType(cluster) + + // If this is a standby cluster and the stanza creation task, if posix storage + // is specified then this ensures that the stanza is created on the local + // repository only. + // + //The stanza for the S3 repo will have already been created by the cluster + // the standby is replicating from, and therefore does not need to be + // attempted again. + if cluster.Spec.Standby && cmd == crv1.PgtaskBackrestStanzaCreate { + repoType = crv1.BackrestStorageTypePosix + } + // create the Job to run the backrest command jobFields := backrestJobTemplateFields{ - JobName: task.Spec.Parameters[config.LABEL_JOB_NAME], - ClusterName: task.Spec.Parameters[config.LABEL_PG_CLUSTER], - PodName: task.Spec.Parameters[config.LABEL_POD_NAME], - SecurityContext: "{}", - Command: cmd, - CommandOpts: task.Spec.Parameters[config.LABEL_BACKREST_OPTS], - PITRTarget: "", - PGOImagePrefix: util.GetValueOrDefault(task.Spec.Parameters[config.LABEL_IMAGE_PREFIX], operator.Pgo.Pgo.PGOImagePrefix), - PGOImageTag: operator.Pgo.Pgo.PGOImageTag, + JobName: task.Spec.Parameters[config.LABEL_JOB_NAME], + ClusterName: task.Spec.Parameters[config.LABEL_PG_CLUSTER], + PodName: task.Spec.Parameters[config.LABEL_POD_NAME], + SecurityContext: `{"runAsNonRoot": true}`, + Command: cmd, + CommandOpts: task.Spec.Parameters[config.LABEL_BACKREST_OPTS], + PITRTarget: "", + CCPImagePrefix: util.GetValueOrDefault(task.Spec.Parameters[config.LABEL_IMAGE_PREFIX], operator.Pgo.Cluster.CCPImagePrefix), + CCPImageTag: util.GetValueOrDefault(util.GetStandardImageTag(cluster.Spec.CCPImage, cluster.Spec.CCPImageTag), + operator.Pgo.Cluster.CCPImageTag), PgbackrestStanza: task.Spec.Parameters[config.LABEL_PGBACKREST_STANZA], PgbackrestDBPath: task.Spec.Parameters[config.LABEL_PGBACKREST_DB_PATH], - PgbackrestRepoPath: task.Spec.Parameters[config.LABEL_PGBACKREST_REPO_PATH], + PgbackrestRepo1Path: task.Spec.Parameters[config.LABEL_PGBACKREST_REPO_PATH], PgbackrestRestoreVolumes: "", PgbackrestRestoreVolumeMounts: "", - PgbackrestRepoType: operator.GetRepoType(task.Spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE]), - BackrestLocalAndS3Storage: operator.IsLocalAndS3Storage(task.Spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE]), + PgbackrestRepo1Type: repoType, + BackrestLocalAndS3Storage: operator.IsLocalAndS3Storage(cluster), PgbackrestS3VerifyTLS: task.Spec.Parameters[config.LABEL_BACKREST_S3_VERIFY_TLS], + Tolerations: util.GetTolerations(cluster.Spec.Tolerations), } podCommandOpts, err := getCommandOptsFromPod(clientset, task, namespace) @@ -106,7 +136,7 @@ func Backrest(namespace string, clientset kubernetes.Interface, task *crv1.Pgtas } if operator.CRUNCHY_DEBUG { - config.BackrestjobTemplate.Execute(os.Stdout, jobFields) + _ = config.BackrestjobTemplate.Execute(os.Stdout, jobFields) } newjob := v1batch.Job{} @@ -121,15 +151,14 @@ func Backrest(namespace string, clientset kubernetes.Interface, task *crv1.Pgtas &newjob.Spec.Template.Spec.Containers[0]) newjob.ObjectMeta.Labels[config.LABEL_PGOUSER] = task.ObjectMeta.Labels[config.LABEL_PGOUSER] - newjob.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = task.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] backupType := task.Spec.Parameters[config.LABEL_PGHA_BACKUP_TYPE] if backupType != "" { newjob.ObjectMeta.Labels[config.LABEL_PGHA_BACKUP_TYPE] = backupType } - clientset.BatchV1().Jobs(namespace).Create(ctx, &newjob, metav1.CreateOptions{}) + _, _ = clientset.BatchV1().Jobs(namespace).Create(ctx, &newjob, metav1.CreateOptions{}) - //publish backrest backup event + // publish backrest backup event if cmd == "backup" { topics := make([]string, 1) topics[0] = events.EventTopicBackup @@ -151,14 +180,12 @@ func Backrest(namespace string, clientset kubernetes.Interface, task *crv1.Pgtas log.Error(err.Error()) } } - } // CreateInitialBackup creates a Pgtask in order to initiate the initial pgBackRest backup for a cluster // as needed to support replica creation func CreateInitialBackup(clientset pgo.Interface, namespace, clusterName, podName string) (*crv1.Pgtask, error) { - var params map[string]string - params = make(map[string]string) + params := make(map[string]string) params[config.LABEL_PGHA_BACKUP_TYPE] = crv1.BackupTypeBootstrap return CreateBackup(clientset, namespace, clusterName, podName, params, "--type=full") } @@ -166,8 +193,7 @@ func CreateInitialBackup(clientset pgo.Interface, namespace, clusterName, podNam // CreatePostFailoverBackup creates a Pgtask in order to initiate the a pgBackRest backup following a failure // event to ensure proper replica creation and/or reinitialization func CreatePostFailoverBackup(clientset pgo.Interface, namespace, clusterName, podName string) (*crv1.Pgtask, error) { - var params map[string]string - params = make(map[string]string) + params := make(map[string]string) params[config.LABEL_PGHA_BACKUP_TYPE] = crv1.BackupTypeFailover return CreateBackup(clientset, namespace, clusterName, podName, params, "") } @@ -200,10 +226,9 @@ func CreateBackup(clientset pgo.Interface, namespace, clusterName, podName strin spec.Parameters[config.LABEL_CONTAINER_NAME] = "database" // pass along the appropriate image prefix for the backup task // this will be used by the associated backrest job - spec.Parameters[config.LABEL_IMAGE_PREFIX] = util.GetValueOrDefault(cluster.Spec.PGOImagePrefix, operator.Pgo.Pgo.PGOImagePrefix) + spec.Parameters[config.LABEL_IMAGE_PREFIX] = util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix) spec.Parameters[config.LABEL_BACKREST_COMMAND] = crv1.PgtaskBackrestBackup spec.Parameters[config.LABEL_BACKREST_OPTS] = backupOpts - spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE] = cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE] // Get 'true' or 'false' for setting the pgBackRest S3 verify TLS value spec.Parameters[config.LABEL_BACKREST_S3_VERIFY_TLS] = operator.GetS3VerifyTLSSetting(cluster) @@ -219,7 +244,6 @@ func CreateBackup(clientset pgo.Interface, namespace, clusterName, podName strin } newInstance.ObjectMeta.Labels = make(map[string]string) newInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] = cluster.Name - newInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = cluster.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] newInstance.ObjectMeta.Labels[config.LABEL_PGOUSER] = cluster.ObjectMeta.Labels[config.LABEL_PGOUSER] _, err = clientset.CrunchydataV1().Pgtasks(cluster.Namespace).Create(ctx, newInstance, metav1.CreateOptions{}) @@ -244,7 +268,7 @@ func CleanBackupResources(clientset kubeapi.Interface, namespace, clusterName st return err } - //remove previous backup job + // remove previous backup job selector := config.LABEL_BACKREST_COMMAND + "=" + crv1.PgtaskBackrestBackup + "," + config.LABEL_PG_CLUSTER + "=" + clusterName + "," + config.LABEL_BACKREST + "=true" deletePropagation := metav1.DeletePropagationForeground @@ -257,27 +281,26 @@ func CleanBackupResources(clientset kubeapi.Interface, namespace, clusterName st log.Error(err) } - timeout := time.After(30 * time.Second) - tick := time.NewTicker(1 * time.Second) - defer tick.Stop() - for { - select { - case <-timeout: + if err := wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { + jobList, err := clientset. + BatchV1().Jobs(namespace). + List(ctx, metav1.ListOptions{LabelSelector: selector}) + if err != nil { + log.Error(err) + return false, err + } + + return len(jobList.Items) == 0, nil + }); err != nil { + if errors.Is(err, wait.ErrWaitTimeout) { return fmt.Errorf("Timed out waiting for deletion of pgBackRest backup job for "+ "cluster %s", clusterName) - case <-tick.C: - jobList, err := clientset. - BatchV1().Jobs(namespace). - List(ctx, metav1.ListOptions{LabelSelector: selector}) - if err != nil { - log.Error(err) - return err - } - if len(jobList.Items) == 0 { - return nil - } } + + return err } + + return nil } // getCommandOptsFromPod adds command line options from the primary pod to a backrest job. diff --git a/internal/operator/backrest/repo.go b/internal/operator/backrest/repo.go index 68c1152056..bed14f834b 100644 --- a/internal/operator/backrest/repo.go +++ b/internal/operator/backrest/repo.go @@ -1,7 +1,7 @@ package backrest /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -44,18 +44,18 @@ var s3RepoTypeRegex = regexp.MustCompile(`--repo-type=["']?s3["']?`) type RepoDeploymentTemplateFields struct { SecurityContext string - PGOImagePrefix string - PGOImageTag string + CCPImagePrefix string + CCPImageTag string ContainerResources string BackrestRepoClaimName string SshdSecretsName string PGbackrestDBHost string - PgbackrestRepoPath string + PgbackrestRepo1Path string PgbackrestDBPath string PgbackrestPGPort string SshdPort int PgbackrestStanza string - PgbackrestRepoType string + PgbackrestRepo1Type crv1.BackrestStorageType PgbackrestS3EnvVars string Name string ClusterName string @@ -65,6 +65,7 @@ type RepoDeploymentTemplateFields struct { PodAntiAffinityLabelValue string Replicas int BootstrapCluster string + Tolerations string } type RepoServiceTemplateFields struct { @@ -97,7 +98,7 @@ func CreateRepoDeployment(clientset kubernetes.Interface, cluster *crv1.Pgcluste serviceName = fmt.Sprintf(util.BackrestRepoServiceName, cluster.Name) } - //create backrest repo service + // create backrest repo service serviceFields := RepoServiceTemplateFields{ Name: serviceName, ClusterName: cluster.Name, @@ -135,7 +136,7 @@ func CreateRepoDeployment(clientset kubernetes.Interface, cluster *crv1.Pgcluste } if operator.CRUNCHY_DEBUG { - config.PgoBackrestRepoTemplate.Execute(os.Stdout, repoFields) + _ = config.PgoBackrestRepoTemplate.Execute(os.Stdout, repoFields) } deployment := appsv1.Deployment{} @@ -159,6 +160,20 @@ func CreateRepoDeployment(clientset kubernetes.Interface, cluster *crv1.Pgcluste return nil } +// CreateRepoSecret allows for the creation of the Secret used to populate +// some (mostly) sensitive fields for managing the pgBackRest repository. +// +// If the Secret already exists, then missing fields will be overwritten. +func CreateRepoSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster) error { + _, err := util.CreateBackrestRepoSecrets(clientset, + util.BackrestRepoConfig{ + ClusterName: cluster.Name, + ClusterNamespace: cluster.Namespace, + OperatorNamespace: operator.PgoNamespace, + }) + return err +} + // setBootstrapRepoOverrides overrides certain fields used to populate the pgBackRest repository template // as needed to support the creation of a bootstrap repository need to bootstrap a new cluster from an // existing data source. @@ -184,7 +199,7 @@ func setBootstrapRepoOverrides(clientset kubernetes.Interface, cluster *crv1.Pgc return err } - repoFields.PgbackrestRepoPath = restoreFromSecret.Annotations[config.ANNOTATION_REPO_PATH] + repoFields.PgbackrestRepo1Path = restoreFromSecret.Annotations[config.ANNOTATION_REPO_PATH] repoFields.PgbackrestPGPort = restoreFromSecret.Annotations[config.ANNOTATION_PG_PORT] sshdPort, err := strconv.Atoi(restoreFromSecret.Annotations[config.ANNOTATION_SSHD_PORT]) @@ -213,22 +228,21 @@ func setBootstrapRepoOverrides(clientset kubernetes.Interface, cluster *crv1.Pgc func getRepoDeploymentFields(clientset kubernetes.Interface, cluster *crv1.Pgcluster, replicas int) *RepoDeploymentTemplateFields { - namespace := cluster.GetNamespace() - repoFields := RepoDeploymentTemplateFields{ - PGOImagePrefix: util.GetValueOrDefault(cluster.Spec.PGOImagePrefix, operator.Pgo.Pgo.PGOImagePrefix), - PGOImageTag: operator.Pgo.Pgo.PGOImageTag, + CCPImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix), + CCPImageTag: util.GetValueOrDefault(util.GetStandardImageTag(cluster.Spec.CCPImage, cluster.Spec.CCPImageTag), + operator.Pgo.Cluster.CCPImageTag), ContainerResources: operator.GetResourcesJSON(cluster.Spec.BackrestResources, cluster.Spec.BackrestLimits), BackrestRepoClaimName: fmt.Sprintf(util.BackrestRepoPVCName, cluster.Name), SshdSecretsName: fmt.Sprintf(util.BackrestRepoSecretName, cluster.Name), PGbackrestDBHost: cluster.Name, - PgbackrestRepoPath: util.GetPGBackRestRepoPath(*cluster), + PgbackrestRepo1Path: operator.GetPGBackRestRepoPath(cluster), PgbackrestDBPath: "/pgdata/" + cluster.Name, PgbackrestPGPort: cluster.Spec.Port, SshdPort: operator.Pgo.Cluster.BackrestPort, PgbackrestStanza: "db", - PgbackrestRepoType: operator.GetRepoType(cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE]), - PgbackrestS3EnvVars: operator.GetPgbackrestS3EnvVars(*cluster, clientset, namespace), + PgbackrestRepo1Type: operator.GetRepoType(cluster), + PgbackrestS3EnvVars: operator.GetPgbackrestS3EnvVars(clientset, *cluster), Name: fmt.Sprintf(util.BackrestRepoServiceName, cluster.Name), ClusterName: cluster.Name, SecurityContext: operator.GetPodSecurityContext(cluster.Spec.BackrestStorage.GetSupplementalGroups()), @@ -239,6 +253,7 @@ func getRepoDeploymentFields(clientset kubernetes.Interface, cluster *crv1.Pgclu PodAntiAffinityLabelName: config.LABEL_POD_ANTI_AFFINITY, PodAntiAffinityLabelValue: string(operator.GetPodAntiAffinityType(cluster, crv1.PodAntiAffinityDeploymentPgBackRest, cluster.Spec.PodAntiAffinity.PgBackRest)), + Tolerations: util.GetTolerations(cluster.Spec.Tolerations), } return &repoFields @@ -252,7 +267,6 @@ func UpdateAnnotations(clientset kubernetes.Interface, cluster *crv1.Pgcluster, // get a list of all of the instance deployments for the cluster deployment, err := operator.GetBackrestDeployment(clientset, cluster) - if err != nil { return err } @@ -278,7 +292,6 @@ func UpdateResources(clientset kubernetes.Interface, cluster *crv1.Pgcluster) er // get a list of all of the instance deployments for the cluster deployment, err := operator.GetBackrestDeployment(clientset, cluster) - if err != nil { return err } @@ -320,7 +333,7 @@ func createService(clientset kubernetes.Interface, fields *RepoServiceTemplateFi } if operator.CRUNCHY_DEBUG { - config.PgoBackrestRepoServiceTemplate.Execute(os.Stdout, fields) + _ = config.PgoBackrestRepoServiceTemplate.Execute(os.Stdout, fields) } s := v1.Service{} diff --git a/internal/operator/backrest/restore.go b/internal/operator/backrest/restore.go index 1b37817137..eafcdcbf44 100644 --- a/internal/operator/backrest/restore.go +++ b/internal/operator/backrest/restore.go @@ -1,7 +1,7 @@ package backrest /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -26,6 +26,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" "github.com/crunchydata/postgres-operator/pkg/events" pgo "github.com/crunchydata/postgres-operator/pkg/generated/clientset/versioned" @@ -48,32 +49,10 @@ const ( // for pgBackRest using the '--target' option var restoreTargetRegex = regexp.MustCompile("--target(=| +)") -type BackrestRestoreJobTemplateFields struct { - JobName string - ClusterName string - WorkflowID string - ToClusterPVCName string - SecurityContext string - PGOImagePrefix string - PGOImageTag string - CommandOpts string - PITRTarget string - PgbackrestStanza string - PgbackrestDBPath string - PgbackrestRepo1Path string - PgbackrestRepo1Host string - PgbackrestS3EnvVars string - NodeSelector string - Tablespaces string - TablespaceVolumes string - TablespaceVolumeMounts string -} - // UpdatePGClusterSpecForRestore updates the spec for pgcluster resource provided as need to // perform a restore func UpdatePGClusterSpecForRestore(clientset kubeapi.Interface, cluster *crv1.Pgcluster, task *crv1.Pgtask) { - cluster.Spec.PGDataSource.RestoreFrom = cluster.GetName() restoreOpts := task.Spec.Parameters[config.LABEL_BACKREST_RESTORE_OPTS] @@ -93,10 +72,20 @@ func UpdatePGClusterSpecForRestore(clientset kubeapi.Interface, cluster *crv1.Pg cluster.Spec.PGDataSource.RestoreOpts = restoreOpts // set the proper node affinity for the restore job - cluster.Spec.UserLabels[config.LABEL_NODE_LABEL_KEY] = - task.Spec.Parameters[config.LABEL_NODE_LABEL_KEY] - cluster.Spec.UserLabels[config.LABEL_NODE_LABEL_VALUE] = - task.Spec.Parameters[config.LABEL_NODE_LABEL_VALUE] + if task.Spec.Parameters[config.LABEL_NODE_LABEL_KEY] != "" && task.Spec.Parameters[config.LABEL_NODE_LABEL_VALUE] != "" { + affinityType := crv1.NodeAffinityTypePreferred + if task.Spec.Parameters[config.LABEL_NODE_AFFINITY_TYPE] == "required" { + affinityType = crv1.NodeAffinityTypeRequired + } + + cluster.Spec.NodeAffinity = crv1.NodeAffinitySpec{ + Default: util.GenerateNodeAffinity( + affinityType, + task.Spec.Parameters[config.LABEL_NODE_LABEL_KEY], + []string{task.Spec.Parameters[config.LABEL_NODE_LABEL_VALUE]}, + ), + } + } } // PrepareClusterForRestore prepares a PostgreSQL cluster for a restore. This includes deleting @@ -116,7 +105,6 @@ func PrepareClusterForRestore(clientset kubeapi.Interface, cluster *crv1.Pgclust patch, err := kubeapi.NewMergePatch(). Add("metadata", "annotations")(map[string]string{ config.ANNOTATION_BACKREST_RESTORE: "", - config.ANNOTATION_CURRENT_PRIMARY: clusterName, }). Add("metadata", "labels")(map[string]string{ config.LABEL_DEPLOYMENT_NAME: clusterName, @@ -185,7 +173,24 @@ func PrepareClusterForRestore(clientset kubeapi.Interface, cluster *crv1.Pgclust }); err != nil { return nil, err } - log.Debugf("restore workflow: deleted primary and replicas %v", pgInstances) + log.Debugf("restore workflow: deleted primary and replica deployments for cluster %s", + clusterName) + + // Wait for all primary and replica deployments to be removed. If unable to verify that all + // deployments have been removed, then the restore cannot proceed and the function returns. + if err := wait.Poll(time.Second/2, time.Minute*3, func() (bool, error) { + for _, deployment := range pgInstances.Items { + if _, err := clientset.AppsV1().Deployments(namespace). + Get(ctx, deployment.GetName(), metav1.GetOptions{}); err == nil || !kerrors.IsNotFound(err) { + return false, nil + } + } + return true, nil + }); err != nil { + return nil, err + } + log.Debugf("restore workflow: finished waiting for primary and replica deployments for "+ + "cluster %s to be removed", clusterName) // delete all existing jobs deletePropagation := metav1.DeletePropagationBackground @@ -200,7 +205,7 @@ func PrepareClusterForRestore(clientset kubeapi.Interface, cluster *crv1.Pgclust // find all database PVCs for the entire PostgreSQL cluster. Includes the PVCs for all PGDATA // volumes, as well as the PVCs for any WAL and/or tablespace volumes - databasePVCList, err := getPGDatabasePVCNames(clientset, replicas, clusterName, namespace) + databasePVCList, err := getPGDatabasePVCNames(clientset, replicas, cluster) if err != nil { return nil, err } @@ -234,8 +239,10 @@ func PrepareClusterForRestore(clientset kubeapi.Interface, cluster *crv1.Pgclust clusterName) // Delete the DCS and leader ConfigMaps. These will be recreated during the restore. - configMaps := []string{fmt.Sprintf("%s-config", clusterName), - fmt.Sprintf("%s-leader", clusterName)} + configMaps := []string{ + fmt.Sprintf("%s-config", clusterName), + fmt.Sprintf("%s-leader", clusterName), + } for _, c := range configMaps { if err := clientset.CoreV1().ConfigMaps(namespace). Delete(ctx, c, metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) { @@ -265,7 +272,7 @@ func PrepareClusterForRestore(clientset kubeapi.Interface, cluster *crv1.Pgclust func UpdateWorkflow(clientset pgo.Interface, workflowID, namespace, status string) error { ctx := context.TODO() - //update workflow + // update workflow log.Debugf("restore workflow: update workflow %s", workflowID) selector := crv1.PgtaskWorkflowID + "=" + workflowID taskList, err := clientset.CrunchydataV1().Pgtasks(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) @@ -289,7 +296,7 @@ func UpdateWorkflow(clientset pgo.Interface, workflowID, namespace, status strin } // PublishRestore is responsible for publishing the 'RestoreCluster' event for a restore -func PublishRestore(id, clusterName, username, namespace string) { +func PublishRestore(clusterName, username, namespace string) { topics := make([]string, 1) topics[0] = events.EventTopicCluster @@ -308,7 +315,6 @@ func PublishRestore(id, clusterName, username, namespace string) { if err != nil { log.Error(err.Error()) } - } // getPGDatabasePVCNames returns the names of all PostgreSQL database PVCs for a specific @@ -316,9 +322,12 @@ func PublishRestore(id, clusterName, username, namespace string) { // instances comprising the cluster, in addition to any additional volumes used by those // instances, e.g. PVCs for external WAL and/or tablespace volumes. func getPGDatabasePVCNames(clientset kubeapi.Interface, replicas *crv1.PgreplicaList, - clusterName, namespace string) ([]string, error) { + cluster *crv1.Pgcluster) ([]string, error) { ctx := context.TODO() + namespace := cluster.Namespace + clusterName := cluster.Name + // create a slice with the names of all database instances in the cluster. Even though the // original primary database (with a name matching the cluster name) might no longer exist, // add the cluster name to this list in the event that it does, along with the names of any @@ -338,9 +347,20 @@ func getPGDatabasePVCNames(clientset kubeapi.Interface, replicas *crv1.Pgreplica } var databasePVCList []string + primary := cluster.Annotations[config.ANNOTATION_CURRENT_PRIMARY] + for _, instance := range instances { for _, clusterPVC := range clusterPVCList.Items { + pvcName := clusterPVC.GetName() + + // Keep the current primary PVC's in order to attempt a pgBackRest delta restore. + // Includes the PGDATA PVC, as well as any WAL and/or tablespace PVC's if present. + if pvcName == primary || pvcName == fmt.Sprintf(walPVCPattern, primary) || + strings.HasPrefix(pvcName, fmt.Sprintf(tablespacePVCSuffixPattern, primary)) { + continue + } + if pvcName == instance || pvcName == fmt.Sprintf(walPVCPattern, instance) || strings.HasPrefix(pvcName, fmt.Sprintf(tablespacePVCSuffixPattern, instance)) { databasePVCList = append(databasePVCList, pvcName) diff --git a/internal/operator/backrest/stanza.go b/internal/operator/backrest/stanza.go index 186996abd0..ae4a2e29b8 100644 --- a/internal/operator/backrest/stanza.go +++ b/internal/operator/backrest/stanza.go @@ -1,7 +1,7 @@ package backrest /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,7 +17,6 @@ package backrest import ( "context" - "strings" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/kubeapi" @@ -58,7 +57,7 @@ func StanzaCreate(namespace, clusterName string, clientset kubeapi.Interface) { ctx := context.TODO() taskName := clusterName + "-" + crv1.PgtaskBackrestStanzaCreate - //look up the backrest-repo pod name + // look up the backrest-repo pod name selector := config.LABEL_PG_CLUSTER + "=" + clusterName + "," + config.LABEL_PGO_BACKREST_REPO + "=true" pods, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) if len(pods.Items) != 1 { @@ -78,7 +77,7 @@ func StanzaCreate(namespace, clusterName string, clientset kubeapi.Interface) { return } - //create the stanza-create task + // create the stanza-create task spec := crv1.PgtaskSpec{} spec.Name = taskName @@ -89,36 +88,22 @@ func StanzaCreate(namespace, clusterName string, clientset kubeapi.Interface) { spec.Parameters[config.LABEL_JOB_NAME] = jobName spec.Parameters[config.LABEL_PG_CLUSTER] = clusterName spec.Parameters[config.LABEL_POD_NAME] = podName - spec.Parameters[config.LABEL_CONTAINER_NAME] = "pgo-backrest-repo" + spec.Parameters[config.LABEL_CONTAINER_NAME] = "crunchy-pgbackrest-repo" // pass along the appropriate image prefix for the backup task // this will be used by the associated backrest job - spec.Parameters[config.LABEL_IMAGE_PREFIX] = util.GetValueOrDefault(cluster.Spec.PGOImagePrefix, operator.Pgo.Pgo.PGOImagePrefix) + spec.Parameters[config.LABEL_IMAGE_PREFIX] = util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix) spec.Parameters[config.LABEL_BACKREST_COMMAND] = crv1.PgtaskBackrestStanzaCreate + // Get 'true' or 'false' for setting the pgBackRest S3 verify TLS value + spec.Parameters[config.LABEL_BACKREST_S3_VERIFY_TLS] = operator.GetS3VerifyTLSSetting(cluster) - // Handle stanza creation for a standby cluster, which requires some additional consideration. - // This includes setting the pgBackRest storage type and command options as needed to support - // stanza creation for a standby cluster. If not a standby cluster then simply set the - // storage type and options as usual. + // Handle stanza creation for a standby cluster, which requires some + // additional consideration. + // Since the primary will not be directly accessible to the standby cluster, + // ensure the stanza created in offline mode if cluster.Spec.Standby { - // Since this is a standby cluster, if local storage is specified then ensure stanza - // creation is for the local repo only. The stanza for the S3 repo will have already been - // created by the cluster the standby is replicating from, and therefore does not need to - // be attempted again. - if strings.Contains(cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE], "local") { - spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE] = "local" - } - // Since the primary will not be directly accessible to the standby cluster, create the - // stanza in offline mode spec.Parameters[config.LABEL_BACKREST_OPTS] = "--no-online" - } else { - spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE] = - cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE] - spec.Parameters[config.LABEL_BACKREST_OPTS] = "" } - // Get 'true' or 'false' for setting the pgBackRest S3 verify TLS value - spec.Parameters[config.LABEL_BACKREST_S3_VERIFY_TLS] = operator.GetS3VerifyTLSSetting(cluster) - newInstance := &crv1.Pgtask{ ObjectMeta: metav1.ObjectMeta{ Name: taskName, @@ -133,5 +118,4 @@ func StanzaCreate(namespace, clusterName string, clientset kubeapi.Interface) { if err != nil { log.Error(err) } - } diff --git a/internal/operator/cluster/cluster.go b/internal/operator/cluster/cluster.go index 474e92a52a..8fe574021c 100644 --- a/internal/operator/cluster/cluster.go +++ b/internal/operator/cluster/cluster.go @@ -4,7 +4,7 @@ package cluster /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -39,6 +39,7 @@ import ( v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -52,12 +53,20 @@ type ServiceTemplateFields struct { Port string PGBadgerPort string ExporterPort string - ServiceType string + ServiceType v1.ServiceType } -// ReplicaSuffix ... +// ReplicaSuffix is the suffix of the replica Service name const ReplicaSuffix = "-replica" +const ( + // exporterContainerName is the name of the exporter container + exporterContainerName = "exporter" + + // pgBadgerContainerName is the name of the pgBadger container + pgBadgerContainerName = "pgbadger" +) + func AddClusterBase(clientset kubeapi.Interface, cl *crv1.Pgcluster, namespace string) { ctx := context.TODO() var err error @@ -70,6 +79,14 @@ func AddClusterBase(clientset kubeapi.Interface, cl *crv1.Pgcluster, namespace s return } + // create any missing user secrets that are required to be part of the + // bootstrap + if err := createMissingUserSecrets(clientset, cl); err != nil { + log.Errorf("error creating missing user secrets: %q", err.Error()) + publishClusterCreateFailure(cl, err.Error()) + return + } + if err = addClusterCreateMissingService(clientset, cl, namespace); err != nil { log.Error("error in creating primary service " + err.Error()) publishClusterCreateFailure(cl, err.Error()) @@ -82,8 +99,28 @@ func AddClusterBase(clientset kubeapi.Interface, cl *crv1.Pgcluster, namespace s // logic following a restart of the container. // If the configmap already exists, the cluster creation will continue as this is required // for certain pgcluster upgrades. - if err = operator.CreatePGHAConfigMap(clientset, cl, namespace); err != nil && - !kerrors.IsAlreadyExists(err) { + if err := operator.CreatePGHAConfigMap(clientset, cl, + namespace); kerrors.IsAlreadyExists(err) { + if !pghaConigMapHasInitFlag(clientset, cl) { + log.Infof("found existing pgha ConfigMap for cluster %s without init flag set. "+ + "setting init flag to 'true'", cl.GetName()) + + // if the value is not present, update the config map + if err := operator.UpdatePGHAConfigInitFlag(clientset, true, cl.Name, cl.Namespace); err != nil { + log.Error(err) + publishClusterCreateFailure(cl, err.Error()) + return + } + } + } else if err != nil { + log.Error(err) + publishClusterCreateFailure(cl, err.Error()) + return + } + + // ensure the the pgBackRest Secret is created. If this fails, we have to + // abort + if err := backrest.CreateRepoSecret(clientset, cl); err != nil { log.Error(err) publishClusterCreateFailure(cl, err.Error()) return @@ -140,8 +177,8 @@ func AddClusterBase(clientset kubeapi.Interface, cl *crv1.Pgcluster, namespace s log.Error("error in pvcname patch " + err.Error()) } - //publish create cluster event - //capture the cluster creation event + // publish create cluster event + // capture the cluster creation event pgouser := cl.ObjectMeta.Labels[config.LABEL_PGOUSER] topics := make([]string, 1) topics[0] = events.EventTopicCluster @@ -174,17 +211,20 @@ func AddClusterBase(clientset kubeapi.Interface, cl *crv1.Pgcluster, namespace s publishClusterCreateFailure(cl, err.Error()) return } - //create a CRD for each replica + // create a CRD for each replica for i := 0; i < replicaCount; i++ { spec := crv1.PgreplicaSpec{} - //get the storage config + // get the storage config spec.ReplicaStorage = cl.Spec.ReplicaStorage spec.UserLabels = cl.Spec.UserLabels - //the replica should not use the same node labels as the primary - spec.UserLabels[config.LABEL_NODE_LABEL_KEY] = "" - spec.UserLabels[config.LABEL_NODE_LABEL_VALUE] = "" + // if the primary cluster has default node affinity rules set, we need + // to honor them in the spec. if a different affinity is desired, the + // replica needs to set its own rules + if cl.Spec.NodeAffinity.Default != nil { + spec.NodeAffinity = cl.Spec.NodeAffinity.Default + } labels := make(map[string]string) labels[config.LABEL_PG_CLUSTER] = cl.Spec.Name @@ -223,8 +263,16 @@ func AddClusterBootstrap(clientset kubeapi.Interface, cluster *crv1.Pgcluster) e ctx := context.TODO() namespace := cluster.GetNamespace() - if err := operator.CreatePGHAConfigMap(clientset, cluster, namespace); err != nil && - !kerrors.IsAlreadyExists(err) { + var err error + + if err = operator.CreatePGHAConfigMap(clientset, cluster, + namespace); kerrors.IsAlreadyExists(err) { + log.Infof("found existing pgha ConfigMap for cluster %s, setting init flag to 'true'", + cluster.GetName()) + err = operator.UpdatePGHAConfigInitFlag(clientset, true, cluster.Name, cluster.Namespace) + } + if err != nil { + log.Error(err) publishClusterCreateFailure(cluster, err.Error()) return err } @@ -298,38 +346,6 @@ func AddBootstrapRepo(clientset kubernetes.Interface, cluster *crv1.Pgcluster) ( return } -// DeleteClusterBase ... -func DeleteClusterBase(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string) { - - DeleteCluster(clientset, cl, namespace) - - //delete any existing configmaps - if err := deleteConfigMaps(clientset, cl.Spec.Name, namespace); err != nil { - log.Error(err) - } - - //delete any existing pgtasks ??? - - //publish delete cluster event - topics := make([]string, 1) - topics[0] = events.EventTopicCluster - - f := events.EventDeleteClusterFormat{ - EventHeader: events.EventHeader{ - Namespace: namespace, - Username: cl.ObjectMeta.Labels[config.LABEL_PGOUSER], - Topic: topics, - Timestamp: time.Now(), - EventType: events.EventDeleteCluster, - }, - Clustername: cl.Spec.Name, - } - - if err := events.Publish(f); err != nil { - log.Error(err) - } -} - // ScaleBase ... func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) { ctx := context.TODO() @@ -339,7 +355,7 @@ func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace s return } - //get the pgcluster CRD to base the replica off of + // get the pgcluster CRD to base the replica off of cluster, err := clientset.CrunchydataV1().Pgclusters(namespace). Get(ctx, replica.Spec.ClusterName, metav1.GetOptions{}) if err != nil { @@ -354,7 +370,7 @@ func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace s return } - //update the replica CRD pvcname + // update the replica CRD pvcname patch, err := kubeapi.NewJSONPatch().Add("spec", "replicastorage", "name")(dataVolume.PersistentVolumeClaimName).Bytes() if err == nil { log.Debugf("patching replica %s: %s", replica.Spec.Name, patch) @@ -365,20 +381,20 @@ func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace s log.Error("error in pvcname patch " + err.Error()) } - //create the replica service if it doesnt exist + // create the replica service if it doesnt exist if err = scaleReplicaCreateMissingService(clientset, replica, cluster, namespace); err != nil { log.Error(err) publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster) return } - //instantiate the replica + // instantiate the replica if err = scaleReplicaCreateDeployment(clientset, replica, cluster, namespace, dataVolume, walVolume, tablespaceVolumes); err != nil { publishScaleError(namespace, replica.ObjectMeta.Labels[config.LABEL_PGOUSER], cluster) return } - //update the replica CRD status + // update the replica CRD status patch, err = kubeapi.NewJSONPatch().Add("spec", "status")(crv1.CompletedStatus).Bytes() if err == nil { log.Debugf("patching replica %s: %s", replica.Spec.Name, patch) @@ -389,7 +405,7 @@ func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace s log.Error("error in status patch " + err.Error()) } - //publish event for replica creation + // publish event for replica creation topics := make([]string, 1) topics[0] = events.EventTopicCluster @@ -414,16 +430,16 @@ func ScaleBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace s func ScaleDownBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespace string) { ctx := context.TODO() - //get the pgcluster CRD for this replica + // get the pgcluster CRD for this replica _, err := clientset.CrunchydataV1().Pgclusters(namespace). Get(ctx, replica.Spec.ClusterName, metav1.GetOptions{}) if err != nil { return } - DeleteReplica(clientset, replica, namespace) + _ = DeleteReplica(clientset, replica, namespace) - //publish event for scale down + // publish event for scale down topics := make([]string, 1) topics[0] = events.EventTopicCluster @@ -443,115 +459,66 @@ func ScaleDownBase(clientset kubeapi.Interface, replica *crv1.Pgreplica, namespa log.Error(err.Error()) return } - } // UpdateAnnotations updates the annotations in the "template" portion of a // PostgreSQL deployment -func UpdateAnnotations(clientset kubernetes.Interface, restConfig *rest.Config, - cluster *crv1.Pgcluster, annotations map[string]string) error { - ctx := context.TODO() - var updateError error - - // first, get a list of all of the instance deployments for the cluster - deployments, err := operator.GetInstanceDeployments(clientset, cluster) +func UpdateAnnotations(clientset kubeapi.Interface, cluster *crv1.Pgcluster, deployment *apps_v1.Deployment) error { + log.Debugf("update annotations on [%s]", deployment.Name) + annotations := map[string]string{} - if err != nil { - return err + // store the global annotations first + for k, v := range cluster.Spec.Annotations.Global { + annotations[k] = v } - // now update each deployment with the new annotations - for _, deployment := range deployments.Items { - log.Debugf("update annotations on [%s]", deployment.Name) - log.Debugf("new annotations: %v", annotations) + // then store the postgres specific annotations + for k, v := range cluster.Spec.Annotations.Postgres { + annotations[k] = v + } - deployment.Spec.Template.ObjectMeta.SetAnnotations(annotations) + log.Debugf("new annotations: %v", annotations) - // Before applying the update, we want to explicitly stop PostgreSQL on each - // instance. This prevents PostgreSQL from having to boot up in crash - // recovery mode. - // - // If an error is returned, we only issue a warning - if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil { - log.Warn(err) - } + // set the annotations on the deployment object + deployment.Spec.Template.ObjectMeta.SetAnnotations(annotations) - // finally, update the Deployment. If something errors, we'll log that there - // was an error, but continue with processing the other deployments - if _, err := clientset.AppsV1().Deployments(deployment.Namespace). - Update(ctx, &deployment, metav1.UpdateOptions{}); err != nil { - log.Error(err) - updateError = err - } - } - - return updateError + return nil } // UpdateResources updates the PostgreSQL instance Deployments to reflect the // update resources (i.e. CPU, memory) -func UpdateResources(clientset kubernetes.Interface, restConfig *rest.Config, cluster *crv1.Pgcluster) error { - ctx := context.TODO() - - // get a list of all of the instance deployments for the cluster - deployments, err := operator.GetInstanceDeployments(clientset, cluster) - - if err != nil { - return err - } - +func UpdateResources(clientset kubeapi.Interface, cluster *crv1.Pgcluster, deployment *apps_v1.Deployment) error { // iterate through each PostgreSQL instance deployment and update the // resource values for the database or exporter containers - // - // NOTE: a future version (near future) will first try to detect the primary - // so that all the replicas are updated first, and then the primary gets the - // update - for _, deployment := range deployments.Items { - // now, iterate through each container within that deployment - for index, container := range deployment.Spec.Template.Spec.Containers { - // first check for the database container - if container.Name == "database" { - // first, initialize the requests/limits resource to empty Resource Lists - deployment.Spec.Template.Spec.Containers[index].Resources.Requests = v1.ResourceList{} - deployment.Spec.Template.Spec.Containers[index].Resources.Limits = v1.ResourceList{} - - // now, simply deep copy the values from the CRD - if cluster.Spec.Resources != nil { - deployment.Spec.Template.Spec.Containers[index].Resources.Requests = cluster.Spec.Resources.DeepCopy() - } - - if cluster.Spec.Limits != nil { - deployment.Spec.Template.Spec.Containers[index].Resources.Limits = cluster.Spec.Limits.DeepCopy() - } - // next, check for the exporter container - } else if container.Name == "exporter" { - // first, initialize the requests/limits resource to empty Resource Lists - deployment.Spec.Template.Spec.Containers[index].Resources.Requests = v1.ResourceList{} - deployment.Spec.Template.Spec.Containers[index].Resources.Limits = v1.ResourceList{} - - // now, simply deep copy the values from the CRD - if cluster.Spec.ExporterResources != nil { - deployment.Spec.Template.Spec.Containers[index].Resources.Requests = cluster.Spec.ExporterResources.DeepCopy() - } - - if cluster.Spec.ExporterLimits != nil { - deployment.Spec.Template.Spec.Containers[index].Resources.Limits = cluster.Spec.ExporterLimits.DeepCopy() - } + for index, container := range deployment.Spec.Template.Spec.Containers { + // first check for the database container + if container.Name == "database" { + // first, initialize the requests/limits resource to empty Resource Lists + deployment.Spec.Template.Spec.Containers[index].Resources.Requests = v1.ResourceList{} + deployment.Spec.Template.Spec.Containers[index].Resources.Limits = v1.ResourceList{} + + // now, simply deep copy the values from the CRD + if cluster.Spec.Resources != nil { + deployment.Spec.Template.Spec.Containers[index].Resources.Requests = cluster.Spec.Resources.DeepCopy() + } + if cluster.Spec.Limits != nil { + deployment.Spec.Template.Spec.Containers[index].Resources.Limits = cluster.Spec.Limits.DeepCopy() + } + // next, check for the exporter container + } else if container.Name == "exporter" { + // first, initialize the requests/limits resource to empty Resource Lists + deployment.Spec.Template.Spec.Containers[index].Resources.Requests = v1.ResourceList{} + deployment.Spec.Template.Spec.Containers[index].Resources.Limits = v1.ResourceList{} + + // now, simply deep copy the values from the CRD + if cluster.Spec.ExporterResources != nil { + deployment.Spec.Template.Spec.Containers[index].Resources.Requests = cluster.Spec.ExporterResources.DeepCopy() + } + + if cluster.Spec.ExporterLimits != nil { + deployment.Spec.Template.Spec.Containers[index].Resources.Limits = cluster.Spec.ExporterLimits.DeepCopy() } - } - // Before applying the update, we want to explicitly stop PostgreSQL on each - // instance. This prevents PostgreSQL from having to boot up in crash - // recovery mode. - // - // If an error is returned, we only issue a warning - if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil { - log.Warn(err) - } - // update the deployment with the new values - if _, err := clientset.AppsV1().Deployments(deployment.Namespace). - Update(ctx, &deployment, metav1.UpdateOptions{}); err != nil { - return err } } @@ -560,120 +527,117 @@ func UpdateResources(clientset kubernetes.Interface, restConfig *rest.Config, cl // UpdateTablespaces updates the PostgreSQL instance Deployments to update // what tablespaces are mounted. -// Though any new tablespaces are present in the CRD, to attempt to do less work -// this function takes a map of the new tablespaces that are being added, so we -// only have to check and create the PVCs that are being mounted at this time -// -// To do this, iterate through the tablespace mount map that is present in the -// new cluster. -func UpdateTablespaces(clientset kubernetes.Interface, restConfig *rest.Config, - cluster *crv1.Pgcluster, newTablespaces map[string]crv1.PgStorageSpec) error { - ctx := context.TODO() - - // first, get a list of all of the instance deployments for the cluster - deployments, err := operator.GetInstanceDeployments(clientset, cluster) +func UpdateTablespaces(clientset kubeapi.Interface, cluster *crv1.Pgcluster, deployment *apps_v1.Deployment) error { + // update the volume portion of the Deployment spec to reflect all of the + // available tablespaces + for tablespaceName, storageSpec := range cluster.Spec.TablespaceMounts { + // go through the volume list and see if there is already a volume for this + // if there is, skip + found := false + volumeName := operator.GetTablespaceVolumeName(tablespaceName) + + for _, volume := range deployment.Spec.Template.Spec.Volumes { + if volume.Name == volumeName { + found = true + break + } + } - if err != nil { - return err - } + if found { + continue + } - tablespaceVolumes := make([]map[string]operator.StorageResult, len(deployments.Items)) + // create the volume definition for the tablespace + storageResult := operator.StorageResult{ + PersistentVolumeClaimName: operator.GetTablespacePVCName(deployment.Name, tablespaceName), + SupplementalGroups: storageSpec.GetSupplementalGroups(), + } - // now we can start creating the new tablespaces! First, create the new - // PVCs. The PVCs are created for each **instance** in the cluster, as every - // instance needs to have a distinct PVC for each tablespace - for i, deployment := range deployments.Items { - tablespaceVolumes[i] = make(map[string]operator.StorageResult) + volume := v1.Volume{ + Name: volumeName, + VolumeSource: storageResult.VolumeSource(), + } - for tablespaceName, storageSpec := range newTablespaces { - // get the name of the tablespace PVC for that instance - tablespacePVCName := operator.GetTablespacePVCName(deployment.Name, tablespaceName) + // add the volume to the list of volumes + deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volume) - log.Debugf("creating tablespace PVC [%s] for [%s]", tablespacePVCName, deployment.Name) + // now add the volume mount point to that of the database container + volumeMount := v1.VolumeMount{ + MountPath: fmt.Sprintf("%s%s", config.VOLUME_TABLESPACE_PATH_PREFIX, tablespaceName), + Name: volumeName, + } - // and now create it! If it errors, we just need to return, which - // potentially leaves things in an inconsistent state, but at this point - // only PVC objects have been created - tablespaceVolumes[i][tablespaceName], err = pvc.CreateIfNotExists(clientset, - storageSpec, tablespacePVCName, cluster.Name, cluster.Namespace) - if err != nil { - return err - } + // we can do this as we always know that the "database" container is the + // first container in the list + deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append( + deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount) + + // add any supplemental groups specified in storage configuration. + // SecurityContext is always initialized because we use fsGroup. + deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups = append( + deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups, + storageResult.SupplementalGroups...) + } + + // find the "PGHA_TABLESPACES" value and update it with the new tablespace + // name list + ok := false + for i, envVar := range deployment.Spec.Template.Spec.Containers[0].Env { + // yup, it's an old fashioned linear time lookup + if envVar.Name == "PGHA_TABLESPACES" { + deployment.Spec.Template.Spec.Containers[0].Env[i].Value = operator.GetTablespaceNames( + cluster.Spec.TablespaceMounts) + ok = true } } - // now the fun step: update each deployment with the new volumes - for i, deployment := range deployments.Items { - log.Debugf("attach tablespace volumes to [%s]", deployment.Name) - - // iterate through each table space and prepare the Volume and - // VolumeMount clause for each instance - for tablespaceName := range newTablespaces { - // this is the volume to be added for the tablespace - volume := v1.Volume{ - Name: operator.GetTablespaceVolumeName(tablespaceName), - VolumeSource: tablespaceVolumes[i][tablespaceName].VolumeSource(), - } - - // add the volume to the list of volumes - deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volume) - - // now add the volume mount point to that of the database container - volumeMount := v1.VolumeMount{ - MountPath: fmt.Sprintf("%s%s", config.VOLUME_TABLESPACE_PATH_PREFIX, tablespaceName), - Name: operator.GetTablespaceVolumeName(tablespaceName), - } + // if its not found, we need to add it to the env + if !ok { + envVar := v1.EnvVar{ + Name: "PGHA_TABLESPACES", + Value: operator.GetTablespaceNames(cluster.Spec.TablespaceMounts), + } + deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, envVar) + } - // we can do this as we always know that the "database" container is the - // first container in the list - deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append( - deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount) + return nil +} - // add any supplemental groups specified in storage configuration. - // SecurityContext is always initialized because we use fsGroup. - deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups = append( - deployment.Spec.Template.Spec.SecurityContext.SupplementalGroups, - tablespaceVolumes[i][tablespaceName].SupplementalGroups...) - } +// UpdateTolerations updates the Toleration definition for a Deployment. +// However, we have to check if the Deployment is based on a pgreplica Spec -- +// if it is, we need to determine if there are any instance specific tolerations +// defined on that +func UpdateTolerations(clientset kubeapi.Interface, cluster *crv1.Pgcluster, deployment *apps_v1.Deployment) error { + ctx := context.TODO() - // find the "PGHA_TABLESPACES" value and update it with the new tablespace - // name list - ok := false - for i, envVar := range deployment.Spec.Template.Spec.Containers[0].Env { - // yup, it's an old fashioned linear time lookup - if envVar.Name == "PGHA_TABLESPACES" { - deployment.Spec.Template.Spec.Containers[0].Env[i].Value = operator.GetTablespaceNames( - cluster.Spec.TablespaceMounts) - ok = true - } - } + // determine if this instance is based on the pgcluster or a pgreplica. if + // it is based on the pgcluster, we can apply the tolerations and exit early + if deployment.Name == cluster.Name { + deployment.Spec.Template.Spec.Tolerations = cluster.Spec.Tolerations + return nil + } - // if its not found, we need to add it to the env - if !ok { - envVar := v1.EnvVar{ - Name: "PGHA_TABLESPACES", - Value: operator.GetTablespaceNames(cluster.Spec.TablespaceMounts), - } - deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, envVar) - } + // ok, so this is based on a pgreplica. Let's try to find it. + instance, err := clientset.CrunchydataV1().Pgreplicas(cluster.Namespace).Get(ctx, deployment.Name, metav1.GetOptions{}) - // Before applying the update, we want to explicitly stop PostgreSQL on each - // instance. This prevents PostgreSQL from having to boot up in crash - // recovery mode. - // - // If an error is returned, we only issue a warning - if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil { - log.Warn(err) - } + // if we error, log it and return, as this error will interrupt a rolling update + if err != nil { + log.Error(err) + return err + } - // finally, update the Deployment. Potential to put things into an - // inconsistent state if any of these updates fail - if _, err := clientset.AppsV1().Deployments(deployment.Namespace). - Update(ctx, &deployment, metav1.UpdateOptions{}); err != nil { - return err - } + // "replica" instances can have toleration overrides. these get managed as + // part of the pgreplicas controller, not here. as such, if this "replica" + // instance has specific toleration overrides, we will exit here so we do not + // apply the cluster-wide tolerations + if len(instance.Spec.Tolerations) != 0 { + return nil } + // otherwise, the tolerations set on the cluster instance are available to + // all instances, so set the value and return + deployment.Spec.Template.Spec.Tolerations = cluster.Spec.Tolerations + return nil } @@ -698,7 +662,7 @@ func annotateBackrestSecret(clientset kubernetes.Interface, cluster *crv1.Pgclus secretName := fmt.Sprintf(util.BackrestRepoSecretName, clusterName) patch, err := kubeapi.NewMergePatch().Add("metadata", "annotations")(map[string]string{ config.ANNOTATION_PG_PORT: cluster.Spec.Port, - config.ANNOTATION_REPO_PATH: util.GetPGBackRestRepoPath(*cluster), + config.ANNOTATION_REPO_PATH: operator.GetPGBackRestRepoPath(cluster), config.ANNOTATION_S3_BUCKET: cfg(cl.BackrestS3Bucket, op.BackrestS3Bucket), config.ANNOTATION_S3_ENDPOINT: cfg(cl.BackrestS3Endpoint, op.BackrestS3Endpoint), config.ANNOTATION_S3_REGION: cfg(cl.BackrestS3Region, op.BackrestS3Region), @@ -716,21 +680,82 @@ func annotateBackrestSecret(clientset kubernetes.Interface, cluster *crv1.Pgclus return err } -func deleteConfigMaps(clientset kubernetes.Interface, clusterName, ns string) error { +// createMissingUserSecret is the heart of trying to determine if a user secret +// is missing, and if it is, creating it. Requires the appropriate secretName +// suffix for a given secret, as well as the user name +// createUserSecret(request, newInstance, crv1.RootSecretSuffix, +// crv1.PGUserSuperuser, request.PasswordSuperuser) +func createMissingUserSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster, username string) error { ctx := context.TODO() - label := fmt.Sprintf("pg-cluster=%s", clusterName) - list, err := clientset.CoreV1().ConfigMaps(ns).List(ctx, metav1.ListOptions{LabelSelector: label}) + + // derive the secret name + secretName := crv1.UserSecretName(cluster, username) + + // if the secret already exists, skip it + // if it returns an error other than "not found" return an error + if _, err := clientset.CoreV1().Secrets(cluster.Spec.Namespace).Get( + ctx, secretName, metav1.GetOptions{}); err == nil { + log.Infof("user secret %q exists for user %q for cluster %q", + secretName, username, cluster.Spec.Name) + return nil + } else if !kerrors.IsNotFound(err) { + return err + } + + // alright, so we have to create the secret + // if the password fails to generate, return an error + passwordLength := util.GeneratedPasswordLength(operator.Pgo.Cluster.PasswordLength) + password, err := util.GeneratePassword(passwordLength) if err != nil { - return fmt.Errorf("No configMaps found for selector: %s", label) + return err } - for _, configmap := range list.Items { - err := clientset.CoreV1().ConfigMaps(ns).Delete(ctx, configmap.Name, metav1.DeleteOptions{}) - if err != nil { - return err - } + // great, now we can create the secret! if we can't, return an error + return util.CreateSecret(clientset, cluster.Spec.Name, secretName, + username, password, cluster.Spec.Namespace) +} + +// createMissingUserSecrets checks to see if there are secrets for the +// superuser (postgres), replication user (primaryuser), and a standard postgres +// user for the given cluster. Each of these are created if they do not +// currently exist +func createMissingUserSecrets(clientset kubernetes.Interface, cluster *crv1.Pgcluster) error { + // first, determine if we need to create a user secret for the postgres + // superuser + if err := createMissingUserSecret(clientset, cluster, crv1.PGUserSuperuser); err != nil { + return err } - return nil + + // next, determine if we need to create a user secret for the replication user + if err := createMissingUserSecret(clientset, cluster, crv1.PGUserReplication); err != nil { + return err + } + + // finally, determine if we need to create a user secret for the regular user + return createMissingUserSecret(clientset, cluster, cluster.Spec.User) +} + +// pghaConigMapHasInitFlag checks to see if the PostgreSQL ConfigMap has the +// PGHA init flag. Returns true if it does have it set, false otherwise. +// If any function calls have an error, we will log that error and return false +func pghaConigMapHasInitFlag(clientset kubernetes.Interface, cluster *crv1.Pgcluster) bool { + ctx := context.TODO() + + // load the PGHA config map for this cluster. This more or less assumes that + // it exists + configMapName := fmt.Sprintf("%s-%s", cluster.Name, operator.PGHAConfigMapSuffix) + configMap, err := clientset.CoreV1().ConfigMaps(cluster.Namespace).Get(ctx, configMapName, metav1.GetOptions{}) + + // if there is an error getting the ConfigMap, log the error and return + if err != nil { + log.Error(err) + return false + } + + // determine if the init flag is set, regardless of it's true or false + _, ok := configMap.Data[operator.PGHAConfigInitSetting] + + return ok } func publishClusterCreateFailure(cl *crv1.Pgcluster, errorMsg string) { @@ -758,10 +783,9 @@ func publishClusterCreateFailure(cl *crv1.Pgcluster, errorMsg string) { } func publishClusterShutdown(cluster crv1.Pgcluster) error { - clusterName := cluster.Name - //capture the cluster creation event + // capture the cluster creation event topics := make([]string, 1) topics[0] = events.EventTopicCluster @@ -794,7 +818,10 @@ func stopPostgreSQLInstance(clientset kubernetes.Interface, restConfig *rest.Con // First, attempt to get the PostgreSQL instance Pod attachd to this // particular deployment selector := fmt.Sprintf("%s=%s", config.LABEL_DEPLOYMENT_NAME, deployment.Name) - pods, err := clientset.CoreV1().Pods(deployment.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) + pods, err := clientset.CoreV1().Pods(deployment.Namespace).List(ctx, metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), + LabelSelector: selector, + }) // if there is a bona fide error, return. // However, if no Pods are found, issue a warning, but do not return an error diff --git a/internal/operator/cluster/clusterlogic.go b/internal/operator/cluster/clusterlogic.go index dd8a9e6827..70f40c4711 100644 --- a/internal/operator/cluster/clusterlogic.go +++ b/internal/operator/cluster/clusterlogic.go @@ -4,7 +4,7 @@ package cluster /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -43,44 +43,43 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" ) // addClusterCreateMissingService creates a service for the cluster primary if // it does not yet exist. -func addClusterCreateMissingService(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string) error { - st := operator.Pgo.Cluster.ServiceType - if cl.Spec.UserLabels[config.LABEL_SERVICE_TYPE] != "" { - st = cl.Spec.UserLabels[config.LABEL_SERVICE_TYPE] - } +func addClusterCreateMissingService(clientset kubernetes.Interface, cluster *crv1.Pgcluster, namespace string) error { + // start with the default value for ServiceType + serviceType := config.DefaultServiceType - // create the primary service - serviceFields := ServiceTemplateFields{ - Name: cl.Spec.Name, - ServiceName: cl.Spec.Name, - ClusterName: cl.Spec.Name, - Port: cl.Spec.Port, - ServiceType: st, + // then see if there is a configuration provided value + if operator.Pgo.Cluster.ServiceType != "" { + serviceType = operator.Pgo.Cluster.ServiceType } - // only add references to the exporter / pgBadger ports - clusterLabels := cl.ObjectMeta.GetLabels() + // then see if there is an override on the custom resource definition + if cluster.Spec.ServiceType != "" { + serviceType = cluster.Spec.ServiceType + } - if val, ok := clusterLabels[config.LABEL_BADGER]; ok && val == config.LABEL_TRUE { - serviceFields.PGBadgerPort = cl.Spec.PGBadgerPort + // create the primary service + serviceFields := ServiceTemplateFields{ + Name: cluster.Spec.Name, + ServiceName: cluster.Spec.Name, + ClusterName: cluster.Spec.Name, + Port: cluster.Spec.Port, + ServiceType: serviceType, } - // ...due to legacy reasons, the exporter label may not be available yet in the - // main labels. so we will check here first, and then check the user labels - if val, ok := clusterLabels[config.LABEL_EXPORTER]; ok && val == config.LABEL_TRUE { - serviceFields.ExporterPort = cl.Spec.ExporterPort + // set the pgBadger port if pgBadger is enabled + if cluster.Spec.PGBadger { + serviceFields.PGBadgerPort = cluster.Spec.PGBadgerPort } - // ...this condition should be targeted for removal in the future - if cl.Spec.UserLabels != nil { - if val, ok := cl.Spec.UserLabels[config.LABEL_EXPORTER]; ok && val == config.LABEL_TRUE { - serviceFields.ExporterPort = cl.Spec.ExporterPort - } + // set the exporter port if exporter is enabled + if cluster.Spec.Exporter { + serviceFields.ExporterPort = cluster.Spec.ExporterPort } return CreateService(clientset, &serviceFields, namespace) @@ -93,21 +92,22 @@ func addClusterBootstrapJob(clientset kubeapi.Interface, tablespaceVolumes map[string]operator.StorageResult) error { ctx := context.TODO() - bootstrapFields, err := getBootstrapJobFields(clientset, cl, dataVolume, walVolume, + bootstrapFields, err := getBootstrapJobFields(clientset, cl, dataVolume, tablespaceVolumes) if err != nil { return err } + if operator.CRUNCHY_DEBUG { + _ = config.BootstrapTemplate.Execute(os.Stdout, bootstrapFields) + } + var bootstrapSpec bytes.Buffer + if err := config.BootstrapTemplate.Execute(&bootstrapSpec, bootstrapFields); err != nil { return err } - if operator.CRUNCHY_DEBUG { - config.DeploymentTemplate.Execute(os.Stdout, bootstrapFields) - } - job := &batchv1.Job{} if err := json.Unmarshal(bootstrapSpec.Bytes(), job); err != nil { return err @@ -141,17 +141,17 @@ func addClusterDeployments(clientset kubeapi.Interface, } deploymentFields := getClusterDeploymentFields(clientset, cl, - dataVolume, walVolume, tablespaceVolumes) + dataVolume, tablespaceVolumes) + + if operator.CRUNCHY_DEBUG { + _ = config.DeploymentTemplate.Execute(os.Stdout, deploymentFields) + } var primaryDoc bytes.Buffer if err := config.DeploymentTemplate.Execute(&primaryDoc, deploymentFields); err != nil { return err } - if operator.CRUNCHY_DEBUG { - config.DeploymentTemplate.Execute(os.Stdout, deploymentFields) - } - deployment := &appsv1.Deployment{} if err := json.Unmarshal(primaryDoc.Bytes(), deployment); err != nil { return err @@ -177,7 +177,7 @@ func addClusterDeployments(clientset kubeapi.Interface, // getBootstrapJobFields obtains the fields needed to populate the cluster bootstrap job template func getBootstrapJobFields(clientset kubeapi.Interface, - cluster *crv1.Pgcluster, dataVolume, walVolume operator.StorageResult, + cluster *crv1.Pgcluster, dataVolume operator.StorageResult, tablespaceVolumes map[string]operator.StorageResult) (operator.BootstrapJobTemplateFields, error) { ctx := context.TODO() @@ -186,7 +186,7 @@ func getBootstrapJobFields(clientset kubeapi.Interface, bootstrapFields := operator.BootstrapJobTemplateFields{ DeploymentTemplateFields: getClusterDeploymentFields(clientset, cluster, dataVolume, - walVolume, tablespaceVolumes), + tablespaceVolumes), RestoreFrom: cluster.Spec.PGDataSource.RestoreFrom, RestoreOpts: restoreOpts[1 : len(restoreOpts)-1], } @@ -235,7 +235,7 @@ func getBootstrapJobFields(clientset kubeapi.Interface, // Now override any backrest env vars for the bootstrap job bootstrapBackrestVars, err := operator.GetPgbackrestBootstrapEnvVars(restoreClusterName, - cluster.GetName(), restoreFromSecret) + cluster.GetAnnotations()[config.ANNOTATION_CURRENT_PRIMARY], restoreFromSecret) if err != nil { return bootstrapFields, err } @@ -257,9 +257,8 @@ func getBootstrapJobFields(clientset kubeapi.Interface, // getClusterDeploymentFields obtains the fields needed to populate the cluster deployment template func getClusterDeploymentFields(clientset kubernetes.Interface, - cl *crv1.Pgcluster, dataVolume, walVolume operator.StorageResult, + cl *crv1.Pgcluster, dataVolume operator.StorageResult, tablespaceVolumes map[string]operator.StorageResult) operator.DeploymentTemplateFields { - namespace := cl.GetNamespace() log.Infof("creating Pgcluster %s in namespace %s", cl.Name, namespace) @@ -277,12 +276,24 @@ func getClusterDeploymentFields(clientset kubernetes.Interface, } cl.Spec.UserLabels[config.LABEL_PGOUSER] = cl.ObjectMeta.Labels[config.LABEL_PGOUSER] - cl.Spec.UserLabels[config.LABEL_PG_CLUSTER_IDENTIFIER] = cl.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] // Set the Patroni scope to the name of the primary deployment. Replicas will get scope using the // 'crunchy-pgha-scope' label on the pgcluster cl.Spec.UserLabels[config.LABEL_PGHA_SCOPE] = cl.Spec.Name + // If applicable, set the exporter labels, used for the scrapers, and create + // the secret. We don't need to take any additional actions, as the cluster + // creation process will handle those. Magic! + if cl.Spec.Exporter { + cl.Spec.UserLabels[config.LABEL_EXPORTER] = config.LABEL_TRUE + + log.Debugf("creating exporter secret for cluster %s", cl.Spec.Name) + + if _, err := CreateExporterSecret(clientset, cl); err != nil { + log.Error(err) + } + } + // set up a map of the names of the tablespaces as well as the storage classes tablespaceStorageTypeMap := operator.GetTablespaceStorageTypeMap(cl.Spec.TablespaceMounts) @@ -293,86 +304,75 @@ func getClusterDeploymentFields(clientset kubernetes.Interface, supplementalGroups = append(supplementalGroups, v.SupplementalGroups...) } - //create the primary deployment + // create the primary deployment deploymentFields := operator.DeploymentTemplateFields{ - Name: cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], - IsInit: true, - Replicas: "0", - ClusterName: cl.Spec.Name, - Port: cl.Spec.Port, - CCPImagePrefix: util.GetValueOrDefault(cl.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix), - CCPImage: cl.Spec.CCPImage, - CCPImageTag: cl.Spec.CCPImageTag, - PVCName: dataVolume.InlineVolumeSource(), - DeploymentLabels: operator.GetLabelsFromMap(cl.Spec.UserLabels), - PodAnnotations: operator.GetAnnotations(cl, crv1.ClusterAnnotationPostgres), - PodLabels: operator.GetLabelsFromMap(cl.Spec.UserLabels), - DataPathOverride: cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], - Database: cl.Spec.Database, - SecurityContext: operator.GetPodSecurityContext(supplementalGroups), - RootSecretName: cl.Spec.RootSecretName, - PrimarySecretName: cl.Spec.PrimarySecretName, - UserSecretName: cl.Spec.UserSecretName, - NodeSelector: operator.GetAffinity(cl.Spec.UserLabels["NodeLabelKey"], cl.Spec.UserLabels["NodeLabelValue"], "In"), - PodAntiAffinity: operator.GetPodAntiAffinity(cl, crv1.PodAntiAffinityDeploymentDefault, cl.Spec.PodAntiAffinity.Default), - ContainerResources: operator.GetResourcesJSON(cl.Spec.Resources, cl.Spec.Limits), - ConfVolume: operator.GetConfVolume(clientset, cl, namespace), - ExporterAddon: operator.GetExporterAddon(clientset, namespace, &cl.Spec), - BadgerAddon: operator.GetBadgerAddon(clientset, namespace, cl, cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY]), - PgmonitorEnvVars: operator.GetPgmonitorEnvVars(cl.Spec.UserLabels[config.LABEL_EXPORTER], cl.Spec.CollectSecretName), - ScopeLabel: config.LABEL_PGHA_SCOPE, - PgbackrestEnvVars: operator.GetPgbackrestEnvVars(cl, cl.Labels[config.LABEL_BACKREST], cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], - cl.Spec.Port, cl.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE]), - PgbackrestS3EnvVars: operator.GetPgbackrestS3EnvVars(*cl, clientset, namespace), - EnableCrunchyadm: operator.Pgo.Cluster.EnableCrunchyadm, - ReplicaReinitOnStartFail: !operator.Pgo.Cluster.DisableReplicaStartFailReinit, - SyncReplication: operator.GetSyncReplication(cl.Spec.SyncReplication), - Tablespaces: operator.GetTablespaceNames(cl.Spec.TablespaceMounts), - TablespaceVolumes: operator.GetTablespaceVolumesJSON(cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], tablespaceStorageTypeMap), - TablespaceVolumeMounts: operator.GetTablespaceVolumeMountsJSON(tablespaceStorageTypeMap), - TLSEnabled: cl.Spec.TLS.IsTLSEnabled(), - TLSOnly: cl.Spec.TLSOnly, - TLSSecret: cl.Spec.TLS.TLSSecret, - ReplicationTLSSecret: cl.Spec.TLS.ReplicationTLSSecret, - CASecret: cl.Spec.TLS.CASecret, - Standby: cl.Spec.Standby, + Name: cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], + IsInit: true, + Replicas: "0", + ClusterName: cl.Spec.Name, + Port: cl.Spec.Port, + CCPImagePrefix: util.GetValueOrDefault(cl.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix), + CCPImage: cl.Spec.CCPImage, + CCPImageTag: cl.Spec.CCPImageTag, + PVCName: dataVolume.InlineVolumeSource(), + DeploymentLabels: operator.GetLabelsFromMap(cl.Spec.UserLabels), + PodAnnotations: operator.GetAnnotations(cl, crv1.ClusterAnnotationPostgres), + PodLabels: operator.GetLabelsFromMap(cl.Spec.UserLabels), + DataPathOverride: cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], + Database: cl.Spec.Database, + SecurityContext: operator.GetPodSecurityContext(supplementalGroups), + RootSecretName: crv1.UserSecretName(cl, crv1.PGUserSuperuser), + PrimarySecretName: crv1.UserSecretName(cl, crv1.PGUserReplication), + UserSecretName: crv1.UserSecretName(cl, cl.Spec.User), + NodeSelector: operator.GetNodeAffinity(cl.Spec.NodeAffinity.Default), + PodAntiAffinity: operator.GetPodAntiAffinity(cl, + crv1.PodAntiAffinityDeploymentDefault, cl.Spec.PodAntiAffinity.Default), + PodAntiAffinityLabelName: config.LABEL_POD_ANTI_AFFINITY, + PodAntiAffinityLabelValue: string(cl.Spec.PodAntiAffinity.Default), + ContainerResources: operator.GetResourcesJSON(cl.Spec.Resources, cl.Spec.Limits), + ConfVolume: operator.GetConfVolume(clientset, cl, namespace), + ExporterAddon: operator.GetExporterAddon(cl.Spec), + BadgerAddon: operator.GetBadgerAddon(cl, cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY]), + PgmonitorEnvVars: operator.GetPgmonitorEnvVars(cl), + ScopeLabel: config.LABEL_PGHA_SCOPE, + PgbackrestEnvVars: operator.GetPgbackrestEnvVars(cl, cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], cl.Spec.Port), + PgbackrestS3EnvVars: operator.GetPgbackrestS3EnvVars(clientset, *cl), + ReplicaReinitOnStartFail: !operator.Pgo.Cluster.DisableReplicaStartFailReinit, + SyncReplication: operator.GetSyncReplication(cl.Spec.SyncReplication), + Tablespaces: operator.GetTablespaceNames(cl.Spec.TablespaceMounts), + TablespaceVolumes: operator.GetTablespaceVolumesJSON(cl.Annotations[config.ANNOTATION_CURRENT_PRIMARY], tablespaceStorageTypeMap), + TablespaceVolumeMounts: operator.GetTablespaceVolumeMountsJSON(tablespaceStorageTypeMap), + TLSEnabled: cl.Spec.TLS.IsTLSEnabled(), + TLSOnly: cl.Spec.TLSOnly, + TLSSecret: cl.Spec.TLS.TLSSecret, + ReplicationTLSSecret: cl.Spec.TLS.ReplicationTLSSecret, + CASecret: cl.Spec.TLS.CASecret, + Standby: cl.Spec.Standby, + Tolerations: util.GetTolerations(cl.Spec.Tolerations), } return deploymentFields } -// DeleteCluster ... -func DeleteCluster(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string) error { - - var err error - log.Info("deleting Pgcluster object" + " in namespace " + namespace) - log.Info("deleting with Name=" + cl.Spec.Name + " in namespace " + namespace) +// scaleReplicaCreateMissingService creates a service for cluster replicas if +// it does not yet exist. +func scaleReplicaCreateMissingService(clientset kubernetes.Interface, replica *crv1.Pgreplica, cluster *crv1.Pgcluster, namespace string) error { + // start with the default value for ServiceType + serviceType := config.DefaultServiceType - //create rmdata job - isReplica := false - isBackup := false - removeData := true - removeBackup := false - err = CreateRmdataJob(clientset, cl, namespace, removeData, removeBackup, isReplica, isBackup) - if err != nil { - log.Error(err) - return err - } else { - publishDeleteCluster(namespace, cl.ObjectMeta.Labels[config.LABEL_PGOUSER], cl.Spec.Name, cl.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER]) + // then see if there is a configuration provided value + if operator.Pgo.Cluster.ServiceType != "" { + serviceType = operator.Pgo.Cluster.ServiceType } - return err - -} + // then see if there is an override on the custom resource definition + if cluster.Spec.ServiceType != "" { + serviceType = cluster.Spec.ServiceType + } -// scaleReplicaCreateMissingService creates a service for cluster replicas if -// it does not yet exist. -func scaleReplicaCreateMissingService(clientset kubernetes.Interface, replica *crv1.Pgreplica, cluster *crv1.Pgcluster, namespace string) error { - st := operator.Pgo.Cluster.ServiceType - if replica.Spec.UserLabels[config.LABEL_SERVICE_TYPE] != "" { - st = replica.Spec.UserLabels[config.LABEL_SERVICE_TYPE] - } else if cluster.Spec.UserLabels[config.LABEL_SERVICE_TYPE] != "" { - st = cluster.Spec.UserLabels[config.LABEL_SERVICE_TYPE] + // and finally, see if there is an instance specific override. Yay. + if replica.Spec.ServiceType != "" { + serviceType = replica.Spec.ServiceType } serviceName := fmt.Sprintf("%s-replica", replica.Spec.ClusterName) @@ -381,17 +381,15 @@ func scaleReplicaCreateMissingService(clientset kubernetes.Interface, replica *c ServiceName: serviceName, ClusterName: replica.Spec.ClusterName, Port: cluster.Spec.Port, - ServiceType: st, + ServiceType: serviceType, } // only add references to the exporter / pgBadger ports - clusterLabels := cluster.ObjectMeta.GetLabels() - - if val, ok := clusterLabels[config.LABEL_EXPORTER]; ok && val == config.LABEL_TRUE { + if cluster.Spec.Exporter { serviceFields.ExporterPort = cluster.Spec.ExporterPort } - if val, ok := clusterLabels[config.LABEL_BADGER]; ok && val == config.LABEL_TRUE { + if cluster.Spec.PGBadger { serviceFields.PGBadgerPort = cluster.Spec.PGBadgerPort } @@ -412,25 +410,15 @@ func scaleReplicaCreateDeployment(clientset kubernetes.Interface, var replicaDoc bytes.Buffer serviceName := replica.Spec.ClusterName + "-replica" - //replicaFlag := true // replicaLabels := operator.GetPrimaryLabels(serviceName, replica.Spec.ClusterName, replicaFlag, cluster.Spec.UserLabels) cluster.Spec.UserLabels[config.LABEL_REPLICA_NAME] = replica.Spec.Name cluster.Spec.UserLabels["name"] = serviceName cluster.Spec.UserLabels[config.LABEL_PG_CLUSTER] = replica.Spec.ClusterName - archiveMode := "off" - if cluster.Spec.UserLabels[config.LABEL_ARCHIVE] == "true" { - archiveMode = "on" - } - if cluster.Labels[config.LABEL_BACKREST] == "true" { - //backrest requires archive mode be set to on - archiveMode = "on" - } - image := cluster.Spec.CCPImage - //check for --ccp-image-tag at the command line + // check for --ccp-image-tag at the command line imageTag := cluster.Spec.CCPImageTag if replica.Spec.UserLabels[config.LABEL_CCP_IMAGE_TAG_KEY] != "" { imageTag = replica.Spec.UserLabels[config.LABEL_CCP_IMAGE_TAG_KEY] @@ -438,6 +426,11 @@ func scaleReplicaCreateDeployment(clientset kubernetes.Interface, cluster.Spec.UserLabels[config.LABEL_DEPLOYMENT_NAME] = replica.Spec.Name + // Set the exporter labels, if applicable + if cluster.Spec.Exporter { + cluster.Spec.UserLabels[config.LABEL_EXPORTER] = config.LABEL_TRUE + } + // set up a map of the names of the tablespaces as well as the storage classes tablespaceStorageTypeMap := operator.GetTablespaceStorageTypeMap(cluster.Spec.TablespaceMounts) @@ -448,7 +441,14 @@ func scaleReplicaCreateDeployment(clientset kubernetes.Interface, supplementalGroups = append(supplementalGroups, v.SupplementalGroups...) } - //create the replica deployment + // check if there are any node affinity rules. rules on the replica supersede + // rules on the primary + nodeAffinity := cluster.Spec.NodeAffinity.Default + if replica.Spec.NodeAffinity != nil { + nodeAffinity = replica.Spec.NodeAffinity + } + + // create the replica deployment replicaDeploymentFields := operator.DeploymentTemplateFields{ Name: replica.Spec.Name, ClusterName: replica.Spec.ClusterName, @@ -459,36 +459,41 @@ func scaleReplicaCreateDeployment(clientset kubernetes.Interface, PVCName: dataVolume.InlineVolumeSource(), Database: cluster.Spec.Database, DataPathOverride: replica.Spec.Name, - ArchiveMode: archiveMode, Replicas: "1", ConfVolume: operator.GetConfVolume(clientset, cluster, namespace), DeploymentLabels: operator.GetLabelsFromMap(cluster.Spec.UserLabels), PodAnnotations: operator.GetAnnotations(cluster, crv1.ClusterAnnotationPostgres), PodLabels: operator.GetLabelsFromMap(cluster.Spec.UserLabels), SecurityContext: operator.GetPodSecurityContext(supplementalGroups), - RootSecretName: cluster.Spec.RootSecretName, - PrimarySecretName: cluster.Spec.PrimarySecretName, - UserSecretName: cluster.Spec.UserSecretName, + RootSecretName: crv1.UserSecretName(cluster, crv1.PGUserSuperuser), + PrimarySecretName: crv1.UserSecretName(cluster, crv1.PGUserReplication), + UserSecretName: crv1.UserSecretName(cluster, cluster.Spec.User), ContainerResources: operator.GetResourcesJSON(cluster.Spec.Resources, cluster.Spec.Limits), - NodeSelector: operator.GetAffinity(replica.Spec.UserLabels["NodeLabelKey"], replica.Spec.UserLabels["NodeLabelValue"], "In"), - PodAntiAffinity: operator.GetPodAntiAffinity(cluster, crv1.PodAntiAffinityDeploymentDefault, cluster.Spec.PodAntiAffinity.Default), - ExporterAddon: operator.GetExporterAddon(clientset, namespace, &cluster.Spec), - BadgerAddon: operator.GetBadgerAddon(clientset, namespace, cluster, replica.Spec.Name), - ScopeLabel: config.LABEL_PGHA_SCOPE, - PgbackrestEnvVars: operator.GetPgbackrestEnvVars(cluster, cluster.Labels[config.LABEL_BACKREST], replica.Spec.Name, - cluster.Spec.Port, cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE]), - PgbackrestS3EnvVars: operator.GetPgbackrestS3EnvVars(*cluster, clientset, namespace), - EnableCrunchyadm: operator.Pgo.Cluster.EnableCrunchyadm, - ReplicaReinitOnStartFail: !operator.Pgo.Cluster.DisableReplicaStartFailReinit, - SyncReplication: operator.GetSyncReplication(cluster.Spec.SyncReplication), - Tablespaces: operator.GetTablespaceNames(cluster.Spec.TablespaceMounts), - TablespaceVolumes: operator.GetTablespaceVolumesJSON(replica.Spec.Name, tablespaceStorageTypeMap), - TablespaceVolumeMounts: operator.GetTablespaceVolumeMountsJSON(tablespaceStorageTypeMap), - TLSEnabled: cluster.Spec.TLS.IsTLSEnabled(), - TLSOnly: cluster.Spec.TLSOnly, - TLSSecret: cluster.Spec.TLS.TLSSecret, - ReplicationTLSSecret: cluster.Spec.TLS.ReplicationTLSSecret, - CASecret: cluster.Spec.TLS.CASecret, + NodeSelector: operator.GetNodeAffinity(nodeAffinity), + PodAntiAffinity: operator.GetPodAntiAffinity(cluster, + crv1.PodAntiAffinityDeploymentDefault, cluster.Spec.PodAntiAffinity.Default), + PodAntiAffinityLabelName: config.LABEL_POD_ANTI_AFFINITY, + PodAntiAffinityLabelValue: string(cluster.Spec.PodAntiAffinity.Default), + ExporterAddon: operator.GetExporterAddon(cluster.Spec), + BadgerAddon: operator.GetBadgerAddon(cluster, replica.Spec.Name), + ScopeLabel: config.LABEL_PGHA_SCOPE, + PgbackrestEnvVars: operator.GetPgbackrestEnvVars(cluster, replica.Spec.Name, cluster.Spec.Port), + PgbackrestS3EnvVars: operator.GetPgbackrestS3EnvVars(clientset, *cluster), + ReplicaReinitOnStartFail: !operator.Pgo.Cluster.DisableReplicaStartFailReinit, + SyncReplication: operator.GetSyncReplication(cluster.Spec.SyncReplication), + Tablespaces: operator.GetTablespaceNames(cluster.Spec.TablespaceMounts), + TablespaceVolumes: operator.GetTablespaceVolumesJSON(replica.Spec.Name, tablespaceStorageTypeMap), + TablespaceVolumeMounts: operator.GetTablespaceVolumeMountsJSON(tablespaceStorageTypeMap), + TLSEnabled: cluster.Spec.TLS.IsTLSEnabled(), + TLSOnly: cluster.Spec.TLSOnly, + TLSSecret: cluster.Spec.TLS.TLSSecret, + ReplicationTLSSecret: cluster.Spec.TLS.ReplicationTLSSecret, + CASecret: cluster.Spec.TLS.CASecret, + // Give precedence to the tolerations defined on the replica spec, otherwise + // take any tolerations defined on the cluster spec + Tolerations: util.GetValueOrDefault( + util.GetTolerations(replica.Spec.Tolerations), + util.GetTolerations(cluster.Spec.Tolerations)), } switch replica.Spec.ReplicaStorage.StorageType { @@ -506,7 +511,7 @@ func scaleReplicaCreateDeployment(clientset kubernetes.Interface, } if operator.CRUNCHY_DEBUG { - config.DeploymentTemplate.Execute(os.Stdout, replicaDeploymentFields) + _ = config.DeploymentTemplate.Execute(os.Stdout, replicaDeploymentFields) } replicaDeployment := appsv1.Deployment{} @@ -550,7 +555,6 @@ func DeleteReplica(clientset kubernetes.Interface, cl *crv1.Pgreplica, namespace }) return err - } func publishScaleError(namespace string, username string, cluster *crv1.Pgcluster) { @@ -575,27 +579,6 @@ func publishScaleError(namespace string, username string, cluster *crv1.Pgcluste } } -func publishDeleteCluster(namespace, username, clusterName, identifier string) { - topics := make([]string, 1) - topics[0] = events.EventTopicCluster - - f := events.EventDeleteClusterFormat{ - EventHeader: events.EventHeader{ - Namespace: namespace, - Username: username, - Topic: topics, - Timestamp: time.Now(), - EventType: events.EventDeleteCluster, - }, - Clustername: clusterName, - } - - err := events.Publish(f) - if err != nil { - log.Error(err.Error()) - } -} - // ScaleClusterInfo contains information about a cluster obtained when scaling the various // deployments for a cluster. This includes the name of the primary deployment, all replica // deployments, along with the names of the services enabled for the cluster. @@ -625,12 +608,14 @@ func ShutdownCluster(clientset kubeapi.Interface, cluster crv1.Pgcluster) error // only consider pods that are running pods, err := clientset.CoreV1().Pods(cluster.Namespace).List(ctx, options) - if err != nil { return err } - if len(pods.Items) > 1 { + if len(pods.Items) == 0 { + return fmt.Errorf("Cluster Operator: Could not find primary pod for shutdown of "+ + "cluster %s", cluster.Name) + } else if len(pods.Items) > 1 { return fmt.Errorf("Cluster Operator: Invalid number of primary pods (%d) found when "+ "shutting down cluster %s", len(pods.Items), cluster.Name) } @@ -683,7 +668,7 @@ func ShutdownCluster(clientset kubeapi.Interface, cluster crv1.Pgcluster) error return err } - publishClusterShutdown(cluster) + _ = publishClusterShutdown(cluster) return nil } @@ -692,7 +677,6 @@ func ShutdownCluster(clientset kubeapi.Interface, cluster crv1.Pgcluster) error // includes changing the replica count for all clusters to 1, and then updating the pgcluster // with a shutdown status. func StartupCluster(clientset kubernetes.Interface, cluster crv1.Pgcluster) error { - log.Debugf("Cluster Operator: starting cluster %s", cluster.Name) // ensure autofailover is enabled to ensure proper startup of the cluster @@ -795,3 +779,23 @@ func ScaleClusterDeployments(clientset kubernetes.Interface, cluster crv1.Pgclus } return } + +// waitFotDeploymentReady waits for a deployment to be ready, or times out +func waitForDeploymentReady(clientset kubernetes.Interface, namespace, deploymentName string, periodSecs, timeoutSecs time.Duration) error { + ctx := context.TODO() + + // set up the timer and timeout + if err := wait.Poll(periodSecs, timeoutSecs, func() (bool, error) { + // check to see if the deployment is ready + d, err := clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) + if err != nil { + log.Warn(err) + } + + return err == nil && d.Status.Replicas == d.Status.ReadyReplicas, nil + }); err != nil { + return fmt.Errorf("readiness timeout reached for deployment %q", deploymentName) + } + + return nil +} diff --git a/internal/operator/cluster/common.go b/internal/operator/cluster/common.go new file mode 100644 index 0000000000..a17ccec7ef --- /dev/null +++ b/internal/operator/cluster/common.go @@ -0,0 +1,157 @@ +package cluster + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "context" + "fmt" + "strings" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/operator" + pgpassword "github.com/crunchydata/postgres-operator/internal/postgres/password" + "github.com/crunchydata/postgres-operator/internal/util" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + // disable the a Postgres user from logging in. This is safe from SQL + // injection as the string that is being interpolated is escaped + // + // This had the "PASSWORD NULL" feature, but this is only found in + // PostgreSQL 11+, and given we don't want to check for the PG version before + // running the command, we will not use it + sqlDisableLogin = `ALTER ROLE %s NOLOGIN;` + + // sqlEnableLogin is the SQL to update the password + // NOTE: this is safe from SQL injection as we explicitly add the inerpolated + // string as a MD5 hash and we are using the username. + // However, the escaping is handled in the util.SetPostgreSQLPassword function + sqlEnableLogin = `ALTER ROLE %s PASSWORD %s LOGIN;` +) + +// disablePostgreSQLLogin disables the ability for a PostgreSQL user to log in +func disablePostgreSQLLogin(clientset kubernetes.Interface, restconfig *rest.Config, cluster *crv1.Pgcluster, username string) error { + log.Debugf("disable user %q on cluster %q", username, cluster.Name) + // disable the pgbouncer user in the PostgreSQL cluster. + // first, get the primary pod. If we cannot do this, let's consider it an + // error and abort + pod, err := util.GetPrimaryPod(clientset, cluster) + if err != nil { + return err + } + + // This is safe from SQL injection as we are escaping the username + sql := strings.NewReader(fmt.Sprintf(sqlDisableLogin, util.SQLQuoteIdentifier(username))) + cmd := []string{"psql", "-p", cluster.Spec.Port} + + // exec into the pod to run the query + _, stderr, err := kubeapi.ExecToPodThroughAPI(restconfig, clientset, + cmd, "database", pod.Name, pod.ObjectMeta.Namespace, sql) + // if there is an error, log the error from the stderr and return the error + if err != nil { + return fmt.Errorf(stderr) + } + + return nil +} + +// generatePassword generates a password that is used for the PostgreSQL user +// system accounts. This goes off of the configured value for password length +func generatePassword() (string, error) { + // first, get the length of what the password should be + generatedPasswordLength := util.GeneratedPasswordLength(operator.Pgo.Cluster.PasswordLength) + // from there, the password can be generated! + return util.GeneratePassword(generatedPasswordLength) +} + +// getClusterInstanceServices gets all of the services applicable to each +// PostgreSQL instances +func getClusterInstanceServices(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (*v1.ServiceList, error) { + ctx := context.TODO() + options := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,!%s", + config.LABEL_PG_CLUSTER, cluster.Name, config.LABEL_PGO_BACKREST_REPO), + } + + return clientset.CoreV1().Services(cluster.Namespace).List(ctx, options) +} + +// makePostgreSQLPassword creates the expected hash for a password type for a +// PostgreSQL password +// nolint:unparam // this is set up to accept SCRAM in the not-too-distant future +func makePostgreSQLPassword(passwordType pgpassword.PasswordType, username, password string) string { + // get the PostgreSQL password generate based on the password type + // as all of these values are valid, this not not error + postgresPassword, _ := pgpassword.NewPostgresPassword(passwordType, username, password) + + // create the PostgreSQL style hashed password and return + hashedPassword, _ := postgresPassword.Build() + + return hashedPassword +} + +// rotatePostgreSQLPassword generates a new password for the specified +// username/Secret pair and saves it both in PostgreSQL and the Secret itself +func rotatePostgreSQLPassword(clientset kubernetes.Interface, restconfig *rest.Config, cluster *crv1.Pgcluster, + username string) (string, error) { + // determine if we are able to access the primary Pod + pod, err := util.GetPrimaryPod(clientset, cluster) + if err != nil { + return "", err + } + + // generate a new password + password, err := generatePassword() + if err != nil { + return "", err + } + + // update the PostgreSQL instance with the new password. + if err := setPostgreSQLPassword(clientset, restconfig, pod, cluster.Spec.Port, username, password); err != nil { + return "", err + } + + return password, err +} + +// setPostgreSQLPassword updates the password of a user in a PostgreSQL +// cluster by executing into the Pod provided (i.e. a primary) and changing it +func setPostgreSQLPassword(clientset kubernetes.Interface, restconfig *rest.Config, pod *v1.Pod, port, + username, password string) error { + log.Debugf("set %q password in PostgreSQL", username) + + // we use the PostgreSQL "md5" hashing mechanism here to pre-hash the + // password. This is semi-hard coded but is now prepped for SCRAM as a + // password type can be passed in. Almost to SCRAM! + passwordHash := makePostgreSQLPassword(pgpassword.MD5, username, password) + + if err := util.SetPostgreSQLPassword(clientset, restconfig, pod, + port, username, passwordHash, sqlEnableLogin); err != nil { + log.Error(err) + return err + } + + // and that's all! + return nil +} diff --git a/internal/operator/cluster/common_test.go b/internal/operator/cluster/common_test.go new file mode 100644 index 0000000000..aaad3a3292 --- /dev/null +++ b/internal/operator/cluster/common_test.go @@ -0,0 +1,39 @@ +package cluster + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "testing" + + pgpassword "github.com/crunchydata/postgres-operator/internal/postgres/password" +) + +func TestMakePostgresPassword(t *testing.T) { + t.Run("md5", func(t *testing.T) { + t.Run("valid", func(t *testing.T) { + passwordType := pgpassword.MD5 + username := "pgbouncer" + password := "datalake" + expected := "md56294153764d389dc6830b6ce4f923cdb" + + actual := makePostgreSQLPassword(passwordType, username, password) + + if actual != expected { + t.Errorf("expected: %q actual: %q", expected, actual) + } + }) + }) +} diff --git a/internal/operator/cluster/exporter.go b/internal/operator/cluster/exporter.go new file mode 100644 index 0000000000..9969d6f75a --- /dev/null +++ b/internal/operator/cluster/exporter.go @@ -0,0 +1,375 @@ +package cluster + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/operator" + "github.com/crunchydata/postgres-operator/internal/util" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + + log "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + // exporterInstallScript references the embedded script that installs all of + // the pgMonitor functions + exporterInstallScript = "/opt/crunchy/bin/exporter/install.sh" + + // exporterServicePortName is the name used to identify the exporter port in + // the service + exporterServicePortName = "postgres-exporter" +) + +// AddExporter ensures that a PostgreSQL cluster is able to undertake the +// actions required by the "crunchy-postgres-exporter", i.e. +// +// - enable a service port so scrapers can access the metrics +// - it can authenticate as the "ccp_monitoring" user; manages the Secret as +// well +// - all of the monitoring views and functions are available +func AddExporter(clientset kubernetes.Interface, restconfig *rest.Config, cluster *crv1.Pgcluster) error { + ctx := context.TODO() + + // even if this is a standby, we can still set up a Secret (though the password + // value of the Secret is of limited use when the standby is promoted, it can + // be rotated, similar to the pgBouncer password) + + // only create a password Secret if one does not already exist, which is + // handled in the delegated function + password, err := CreateExporterSecret(clientset, cluster) + if err != nil { + return err + } + + // set up the Services, which are still needed on a standby + services, err := getClusterInstanceServices(clientset, cluster) + if err != nil { + return err + } + + // loop over each service to perform the necessary modifications +svcLoop: + for i := range services.Items { + svc := &services.Items[i] + + // loop over the service ports to see if exporter port is already set up. if + // it is, we can continue and skip the outerloop + for _, svcPort := range svc.Spec.Ports { + if svcPort.Name == exporterServicePortName { + continue svcLoop + } + } + + // otherwise, we need to append a service port to the list + port, err := strconv.ParseInt( + util.GetValueOrDefault(cluster.Spec.ExporterPort, operator.Pgo.Cluster.ExporterPort), 10, 32) + // if we can't parse this for whatever reason, issue a warning and continue on + if err != nil { + log.Warn(err) + } + + svcPort := v1.ServicePort{ + Name: exporterServicePortName, + Protocol: v1.ProtocolTCP, + Port: int32(port), + } + + svc.Spec.Ports = append(svc.Spec.Ports, svcPort) + + // if we fail to update the service, warn, but continue on + if _, err := clientset.CoreV1().Services(svc.Namespace).Update(ctx, svc, metav1.UpdateOptions{}); err != nil { + log.Warn(err) + } + } + + // this can't be installed if this is a standby, so abort if that's the case + if cluster.Spec.Standby { + return ErrStandbyNotAllowed + } + + // get the primary pod, which is needed to update the password for the + // exporter user + pod, err := util.GetPrimaryPod(clientset, cluster) + if err != nil { + return err + } + + // add the monitoring user and all the views associated with this + // user. this can be done by executing a script on the container itself + cmd := []string{"/bin/bash", exporterInstallScript} + + if _, stderr, err := kubeapi.ExecToPodThroughAPI(restconfig, clientset, + cmd, "database", pod.Name, pod.ObjectMeta.Namespace, nil); err != nil { + return fmt.Errorf(stderr) + } + + // attempt to update the password in PostgreSQL, as this is how the exporter + // will properly interface with PostgreSQL + return setPostgreSQLPassword(clientset, restconfig, pod, cluster.Spec.Port, crv1.PGUserMonitor, password) +} + +// CreateExporterSecret create a secret used by the exporter containing the +// user credientials. Sees if a Secret already exists and if it does, uses that. +// Otherwise, it will generate the password. Returns an error if it fails. +func CreateExporterSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (string, error) { + ctx := context.TODO() + secretName := util.GenerateExporterSecretName(cluster.Name) + + // see if this secret already exists...if it does, then take an early exit + if password, err := util.GetPasswordFromSecret(clientset, cluster.Namespace, secretName); err == nil { + log.Infof("exporter secret %s already present, will reuse", secretName) + return password, nil + } + + // well, we have to generate the password + password, err := generatePassword() + if err != nil { + return "", err + } + + // the remainder of this is generating the various entries in the pgbouncer + // secret, i.e. substituting values into templates files that contain: + // - the pgbouncer user password + // - the pgbouncer "users.txt" file that contains the credentials for the + // "pgbouncer" user + + // now, we can do what we came here to do, which is create the secret + secret := v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Labels: map[string]string{ + config.LABEL_EXPORTER: config.LABEL_TRUE, + config.LABEL_PG_CLUSTER: cluster.Name, + config.LABEL_VENDOR: config.LABEL_CRUNCHY, + }, + }, + Data: map[string][]byte{ + "username": []byte(crv1.PGUserMonitor), + "password": []byte(password), + }, + } + + if _, err := clientset.CoreV1().Secrets(cluster.Namespace). + Create(ctx, &secret, metav1.CreateOptions{}); err != nil { + log.Error(err) + return "", err + } + + return password, nil +} + +// RemoveExporter disables the ability for a PostgreSQL cluster to use the +// exporter functionality. In particular this function: +// +// - disallows the login of the monitoring user (ccp_monitoring) +// - removes the Secret that contains the ccp_monitoring user credentials +// - removes the port on the cluster Service +// +// This does not modify the Deployment that has the exporter sidecar. That is +// handled by the "UpdateExporter" function, so it can be handled as part of a +// rolling update +func RemoveExporter(clientset kubernetes.Interface, restconfig *rest.Config, cluster *crv1.Pgcluster) error { + ctx := context.TODO() + + // close the exporter port on each service + services, err := getClusterInstanceServices(clientset, cluster) + if err != nil { + return err + } + + for i := range services.Items { + svc := &services.Items[i] + svcPorts := []v1.ServicePort{} + + for _, svcPort := range svc.Spec.Ports { + // if we find the service port for the exporter, skip it in the loop, but + // as we will not be including it in the update + if svcPort.Name == exporterServicePortName { + continue + } + + svcPorts = append(svcPorts, svcPort) + } + + svc.Spec.Ports = svcPorts + + // if we fail to update the service, warn but continue + if _, err := clientset.CoreV1().Services(svc.Namespace).Update(ctx, svc, metav1.UpdateOptions{}); err != nil { + log.Warn(err) + } + } + + // disable the user before clearing the Secret, so there does not end up being + // a race condition between the existence of the Secret and the Pod definition + // if this is a standby cluster, return as we cannot execute any SQL + if !cluster.Spec.Standby { + // if this fails, warn and continue + if err := disablePostgreSQLLogin(clientset, restconfig, cluster, crv1.PGUserMonitor); err != nil { + log.Warn(err) + } + } + + // delete the Secret. If there is an error deleting the Secret, log as info + // and continue on + if err := clientset.CoreV1().Secrets(cluster.Namespace).Delete(ctx, + util.GenerateExporterSecretName(cluster.Name), metav1.DeleteOptions{}); err != nil { + log.Warnf("could not remove exporter secret: %q", err.Error()) + } + + return nil +} + +// RotateExporterPassword rotates the password for the monitoring PostgreSQL +// user +func RotateExporterPassword(clientset kubernetes.Interface, restconfig *rest.Config, cluster *crv1.Pgcluster) error { + ctx := context.TODO() + + // let's also go ahead and get the secret that contains the pgBouncer + // information. If we can't find the secret, we're basically done here + secretName := util.GenerateExporterSecretName(cluster.Name) + secret, err := clientset.CoreV1().Secrets(cluster.Namespace).Get(ctx, secretName, metav1.GetOptions{}) + if err != nil { + return err + } + + // update the password on the PostgreSQL instance + password, err := rotatePostgreSQLPassword(clientset, restconfig, cluster, crv1.PGUserMonitor) + if err != nil { + return err + } + + // next, update the password field of the secret. + secret.Data["password"] = []byte(password) + + // update the secret + if _, err := clientset.CoreV1().Secrets(cluster.Namespace). + Update(ctx, secret, metav1.UpdateOptions{}); err != nil { + return err + } + + // and that's it - the changes will be propagated to the exporter sidecars + return nil +} + +// UpdateExporterSidecar either adds or emoves the metrics sidcar from the +// cluster. This is meant to be used as a rolling update callback function +func UpdateExporterSidecar(clientset kubeapi.Interface, cluster *crv1.Pgcluster, deployment *appsv1.Deployment) error { + // need to determine if we are adding or removing + if cluster.Spec.Exporter { + return addExporterSidecar(cluster, deployment) + } + + removeExporterSidecar(deployment) + + return nil +} + +// addExporterSidecar adds the metrics collection exporter to a Deployment +// This does two things: +// - adds the exporter container to the manifest. If the exporter manifest +// already exists, this supersedes it. +// - adds the exporter label to the label template, so it can be discovered that +// this container has an exporter +func addExporterSidecar(cluster *crv1.Pgcluster, deployment *appsv1.Deployment) error { + // use the legacy template generation to make the appropriate substitutions, + // and then get said generation to be placed into an actual Container object + template := operator.GetExporterAddon(cluster.Spec) + + container := v1.Container{} + + if err := json.Unmarshal([]byte(template), &container); err != nil { + return fmt.Errorf("error unmarshalling exporter json into Container: %w ", err) + } + + // append the container to the deployment container list. However, we are + // going to do this carefully, in case the exporter container already exists. + // this definition will supersede any exporter container already in the + // containers list + containers := []v1.Container{} + for _, c := range deployment.Spec.Template.Spec.Containers { + // skip if this is the exporter container + if c.Name == exporterContainerName { + continue + } + + containers = append(containers, c) + } + + // add the exporter container and override the containers list definition + containers = append(containers, container) + deployment.Spec.Template.Spec.Containers = containers + + // add the label to the deployment template + deployment.Spec.Template.ObjectMeta.Labels[config.LABEL_EXPORTER] = config.LABEL_TRUE + + return nil +} + +// removeExporterSidecar removes the metrics collection exporter to a +// Deployment. +// +// This involves: +// - Removing the container entry for the exporter +// - Removing the label from the deployment template +func removeExporterSidecar(deployment *appsv1.Deployment) { + // first, find the container entry in the list of containers and remove it + containers := []v1.Container{} + for _, c := range deployment.Spec.Template.Spec.Containers { + // skip if this is the exporter container + if c.Name == exporterContainerName { + continue + } + + containers = append(containers, c) + } + + deployment.Spec.Template.Spec.Containers = containers + + // alright, so this moves towards the mix of modern/legacy behavior, but we + // need to scan the environmental variables on the "database" container and + // remove one with the name "PGMONITOR_PASSWORD" + for i, c := range deployment.Spec.Template.Spec.Containers { + if c.Name == "database" { + env := []v1.EnvVar{} + + for _, e := range c.Env { + if e.Name == "PGMONITOR_PASSWORD" { + continue + } + + env = append(env, e) + } + + deployment.Spec.Template.Spec.Containers[i].Env = env + break + } + } + + // finally, remove the label + delete(deployment.Spec.Template.ObjectMeta.Labels, config.LABEL_EXPORTER) +} diff --git a/internal/operator/cluster/failover.go b/internal/operator/cluster/failover.go deleted file mode 100644 index 5f64b86f08..0000000000 --- a/internal/operator/cluster/failover.go +++ /dev/null @@ -1,142 +0,0 @@ -// Package cluster holds the cluster CRD logic and definitions -// A cluster is comprised of a primary service, replica service, -// primary deployment, and replica deployment -package cluster - -/* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import ( - "context" - "encoding/json" - "time" - - "github.com/crunchydata/postgres-operator/internal/config" - "github.com/crunchydata/postgres-operator/internal/kubeapi" - crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" - "github.com/crunchydata/postgres-operator/pkg/events" - pgo "github.com/crunchydata/postgres-operator/pkg/generated/clientset/versioned" - log "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/rest" -) - -// FailoverBase ... -// gets called first on a failover -func FailoverBase(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask, restconfig *rest.Config) { - ctx := context.TODO() - var err error - - //look up the pgcluster for this task - //in the case, the clustername is passed as a key in the - //parameters map - var clusterName string - for k := range task.Spec.Parameters { - clusterName = k - } - - cluster, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(ctx, clusterName, metav1.GetOptions{}) - if err != nil { - return - } - - //create marker (clustername, namespace) - err = PatchpgtaskFailoverStatus(clientset, task, namespace) - if err != nil { - log.Errorf("could not set failover started marker for task %s cluster %s", task.Spec.Name, clusterName) - return - } - - //get initial count of replicas --selector=pg-cluster=clusterName - selector := config.LABEL_PG_CLUSTER + "=" + clusterName - replicaList, err := clientset.CrunchydataV1().Pgreplicas(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) - if err != nil { - log.Error(err) - return - } - log.Debugf("replica count before failover is %d", len(replicaList.Items)) - - //publish event for failover - topics := make([]string, 1) - topics[0] = events.EventTopicCluster - - f := events.EventFailoverClusterFormat{ - EventHeader: events.EventHeader{ - Namespace: namespace, - Username: task.ObjectMeta.Labels[config.LABEL_PGOUSER], - Topic: topics, - Timestamp: time.Now(), - EventType: events.EventFailoverCluster, - }, - Clustername: clusterName, - Target: task.ObjectMeta.Labels[config.LABEL_TARGET], - } - - err = events.Publish(f) - if err != nil { - log.Error(err) - } - - Failover(cluster.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER], clientset, clusterName, task, namespace, restconfig) - - //publish event for failover completed - topics = make([]string, 1) - topics[0] = events.EventTopicCluster - - g := events.EventFailoverClusterCompletedFormat{ - EventHeader: events.EventHeader{ - Namespace: namespace, - Username: task.ObjectMeta.Labels[config.LABEL_PGOUSER], - Topic: topics, - Timestamp: time.Now(), - EventType: events.EventFailoverClusterCompleted, - }, - Clustername: clusterName, - Target: task.ObjectMeta.Labels[config.LABEL_TARGET], - } - - err = events.Publish(g) - if err != nil { - log.Error(err) - } - - //remove marker - -} - -func PatchpgtaskFailoverStatus(clientset pgo.Interface, oldCrd *crv1.Pgtask, namespace string) error { - ctx := context.TODO() - - //change it - oldCrd.Spec.Parameters[config.LABEL_FAILOVER_STARTED] = time.Now().Format(time.RFC3339) - - //create the patch - patchBytes, err := json.Marshal(map[string]interface{}{ - "spec": map[string]interface{}{ - "parameters": oldCrd.Spec.Parameters, - }, - }) - if err != nil { - return err - } - - //apply patch - _, err6 := clientset.CrunchydataV1().Pgtasks(namespace). - Patch(ctx, oldCrd.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) - - return err6 - -} diff --git a/internal/operator/cluster/failoverlogic.go b/internal/operator/cluster/failoverlogic.go deleted file mode 100644 index 6462002ad7..0000000000 --- a/internal/operator/cluster/failoverlogic.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package cluster holds the cluster CRD logic and definitions -// A cluster is comprised of a primary service, replica service, -// primary deployment, and replica deployment -package cluster - -/* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import ( - "context" - "fmt" - "time" - - "github.com/crunchydata/postgres-operator/internal/config" - "github.com/crunchydata/postgres-operator/internal/kubeapi" - "github.com/crunchydata/postgres-operator/internal/util" - crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" - "github.com/crunchydata/postgres-operator/pkg/events" - pgo "github.com/crunchydata/postgres-operator/pkg/generated/clientset/versioned" - log "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -func Failover(identifier string, clientset kubeapi.Interface, clusterName string, task *crv1.Pgtask, namespace string, restconfig *rest.Config) error { - ctx := context.TODO() - - var pod *v1.Pod - var err error - target := task.ObjectMeta.Labels[config.LABEL_TARGET] - - log.Infof("Failover called on [%s] target [%s]", clusterName, target) - - pod, err = util.GetPod(clientset, target, namespace) - if err != nil { - log.Error(err) - return err - } - log.Debugf("pod selected to failover to is %s", pod.Name) - - updateFailoverStatus(clientset, task, namespace, clusterName, "deleted primary deployment "+clusterName) - - //trigger the failover to the selected replica - if err := promote(pod, clientset, namespace, restconfig); err != nil { - log.Warn(err) - } - - publishPromoteEvent(identifier, namespace, task.ObjectMeta.Labels[config.LABEL_PGOUSER], clusterName, target) - - updateFailoverStatus(clientset, task, namespace, clusterName, "promoting pod "+pod.Name+" target "+target) - - //relabel the deployment with primary labels - //by setting service-name=clustername - upod, err := clientset.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) - if err != nil { - log.Error(err) - log.Error("error in getting pod during failover relabel") - return err - } - - //set the service-name label to the cluster name to match - //the primary service selector - log.Debugf("setting label on pod %s=%s", config.LABEL_SERVICE_NAME, clusterName) - - patch, err := kubeapi.NewMergePatch().Add("metadata", "labels", config.LABEL_SERVICE_NAME)(clusterName).Bytes() - if err == nil { - log.Debugf("patching pod %s: %s", upod.Name, patch) - _, err = clientset.CoreV1().Pods(namespace). - Patch(ctx, upod.Name, types.MergePatchType, patch, metav1.PatchOptions{}) - } - if err != nil { - log.Error(err) - log.Error("error in updating pod during failover relabel") - return err - } - - targetDepName := upod.ObjectMeta.Labels[config.LABEL_DEPLOYMENT_NAME] - log.Debugf("patching deployment %s: %s", targetDepName, patch) - _, err = clientset.AppsV1().Deployments(namespace). - Patch(ctx, targetDepName, types.MergePatchType, patch, metav1.PatchOptions{}) - if err != nil { - log.Error(err) - log.Error("error in updating deployment during failover relabel") - return err - } - - updateFailoverStatus(clientset, task, namespace, clusterName, "updating label deployment...pod "+pod.Name+"was the failover target...failover completed") - - //update the pgcluster current-primary to new deployment name - cluster, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(ctx, clusterName, metav1.GetOptions{}) - if err != nil { - log.Errorf("could not find pgcluster %s with labels", clusterName) - return err - } - - // update the CRD with the new current primary. If there is an error, log it - // here, otherwise return - if err := util.CurrentPrimaryUpdate(clientset, cluster, target, namespace); err != nil { - log.Error(err) - return err - } - - return nil - -} - -func updateFailoverStatus(clientset pgo.Interface, task *crv1.Pgtask, namespace, clusterName, message string) { - ctx := context.TODO() - - log.Debugf("updateFailoverStatus namespace=[%s] taskName=[%s] message=[%s]", namespace, task.Name, message) - - //update the task - t, err := clientset.CrunchydataV1().Pgtasks(task.Namespace).Get(ctx, task.Name, metav1.GetOptions{}) - if err != nil { - return - } - *task = *t - - task.Status.Message = message - - t, err = clientset.CrunchydataV1().Pgtasks(task.Namespace).Update(ctx, task, metav1.UpdateOptions{}) - if err != nil { - return - } - *task = *t - -} - -func promote( - pod *v1.Pod, - clientset kubernetes.Interface, - namespace string, restconfig *rest.Config) error { - - // generate the curl command that will be run on the pod selected for the failover in order - // to trigger the failover and promote that specific pod to primary - command := make([]string, 3) - command[0] = "/bin/bash" - command[1] = "-c" - command[2] = fmt.Sprintf("curl -s http://127.0.0.1:%s/failover -XPOST "+ - "-d '{\"candidate\":\"%s\"}'", config.DEFAULT_PATRONI_PORT, pod.Name) - - log.Debugf("running Exec with namespace=[%s] podname=[%s] container name=[%s]", namespace, pod.Name, pod.Spec.Containers[0].Name) - stdout, stderr, err := kubeapi.ExecToPodThroughAPI(restconfig, clientset, command, pod.Spec.Containers[0].Name, pod.Name, namespace, nil) - log.Debugf("stdout=[%s] stderr=[%s]", stdout, stderr) - if err != nil { - log.Error(err) - } - - return err -} - -func publishPromoteEvent(identifier, namespace, username, clusterName, target string) { - topics := make([]string, 1) - topics[0] = events.EventTopicCluster - - f := events.EventFailoverClusterFormat{ - EventHeader: events.EventHeader{ - Namespace: namespace, - Username: username, - Topic: topics, - Timestamp: time.Now(), - EventType: events.EventFailoverCluster, - }, - Clustername: clusterName, - Target: target, - } - - err := events.Publish(f) - if err != nil { - log.Error(err.Error()) - } - -} - -// RemovePrimaryOnRoleChangeTag sets the 'primary_on_role_change' tag to null in the -// Patroni DCS, effectively removing the tag. This is accomplished by exec'ing into -// the primary PG pod, and sending a patch request to update the appropriate data (i.e. -// the 'primary_on_role_change' tag) in the DCS. -func RemovePrimaryOnRoleChangeTag(clientset kubernetes.Interface, restconfig *rest.Config, - clusterName, namespace string) error { - ctx := context.TODO() - - selector := config.LABEL_PG_CLUSTER + "=" + clusterName + - "," + config.LABEL_PGHA_ROLE + "=" + config.LABEL_PGHA_ROLE_PRIMARY - - // only consider pods that are running - options := metav1.ListOptions{ - FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), - LabelSelector: selector, - } - - pods, err := clientset.CoreV1().Pods(namespace).List(ctx, options) - - if err != nil { - log.Error(err) - return err - } else if len(pods.Items) > 1 { - log.Error("More than one primary found after completing the post-failover backup") - } - pod := pods.Items[0] - - // generate the curl command that will be run on the pod selected for the failover in order - // to trigger the failover and promote that specific pod to primary - command := make([]string, 3) - command[0] = "/bin/bash" - command[1] = "-c" - command[2] = fmt.Sprintf("curl -s 127.0.0.1:%s/config -XPATCH -d "+ - "'{\"tags\":{\"primary_on_role_change\":null}}'", config.DEFAULT_PATRONI_PORT) - - log.Debugf("running Exec command '%s' with namespace=[%s] podname=[%s] container name=[%s]", - command, namespace, pod.Name, pod.Spec.Containers[0].Name) - stdout, stderr, err := kubeapi.ExecToPodThroughAPI(restconfig, clientset, command, - pod.Spec.Containers[0].Name, pod.Name, namespace, nil) - log.Debugf("stdout=[%s] stderr=[%s]", stdout, stderr) - if err != nil { - log.Error(err) - return err - } - return nil -} diff --git a/internal/operator/cluster/pgadmin.go b/internal/operator/cluster/pgadmin.go index c49462e4d0..53f456100c 100644 --- a/internal/operator/cluster/pgadmin.go +++ b/internal/operator/cluster/pgadmin.go @@ -1,7 +1,7 @@ package cluster /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -20,7 +20,6 @@ import ( "context" "encoding/base64" "encoding/json" - "errors" "fmt" weakrand "math/rand" "os" @@ -69,8 +68,8 @@ const pgAdminDeploymentFormat = "%s-pgadmin" const initPassLen = 20 const ( - deployTimeout = 60 - pollInterval = 3 + deployTimeout = 60 * time.Second + pollInterval = 3 * time.Second ) // AddPgAdmin contains the various functions that are used to add a pgAdmin @@ -159,7 +158,7 @@ func AddPgAdminFromPgTask(clientset kubeapi.Interface, restconfig *rest.Config, } deployName := fmt.Sprintf(pgAdminDeploymentFormat, clusterName) - if err := waitForDeploymentReady(clientset, namespace, deployName, deployTimeout, pollInterval); err != nil { + if err := waitForDeploymentReady(clientset, namespace, deployName, pollInterval, deployTimeout); err != nil { log.Error(err) } @@ -347,6 +346,7 @@ func createPgAdminDeployment(clientset kubernetes.Interface, cluster *crv1.Pgclu // This password is throwaway so low entropy genreation method is fine randBytes := make([]byte, initPassLen) // weakrand Read is always nil error + // #nosec: G404 weakrand.Read(randBytes) throwawayPass := base64.RawStdEncoding.EncodeToString(randBytes) @@ -354,9 +354,10 @@ func createPgAdminDeployment(clientset kubernetes.Interface, cluster *crv1.Pgclu fields := pgAdminTemplateFields{ Name: pgAdminDeploymentName, ClusterName: cluster.Name, - CCPImagePrefix: operator.Pgo.Cluster.CCPImagePrefix, - CCPImageTag: util.GetStandardImageTag(cluster.Spec.CCPImage, cluster.Spec.CCPImageTag), - DisableFSGroup: operator.Pgo.Cluster.DisableFSGroup, + CCPImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix), + CCPImageTag: util.GetValueOrDefault(util.GetStandardImageTag(cluster.Spec.CCPImage, cluster.Spec.CCPImageTag), + operator.Pgo.Cluster.CCPImageTag), + DisableFSGroup: operator.Pgo.DisableFSGroup(), Port: defPgAdminPort, InitUser: defSetupUsername, InitPass: throwawayPass, @@ -365,7 +366,7 @@ func createPgAdminDeployment(clientset kubernetes.Interface, cluster *crv1.Pgclu // For debugging purposes, put the template substitution in stdout if operator.CRUNCHY_DEBUG { - config.PgAdminTemplate.Execute(os.Stdout, fields) + _ = config.PgAdminTemplate.Execute(os.Stdout, fields) } // perform the actual template substitution @@ -410,7 +411,7 @@ func createPgAdminService(clientset kubernetes.Interface, cluster *crv1.Pgcluste // For debugging purposes, put the template substitution in stdout if operator.CRUNCHY_DEBUG { - config.PgAdminServiceTemplate.Execute(os.Stdout, fields) + _ = config.PgAdminServiceTemplate.Execute(os.Stdout, fields) } // perform the actual template substitution @@ -470,30 +471,3 @@ func publishPgAdminEvent(eventType string, task *crv1.Pgtask) { log.Error(err.Error()) } } - -// waitFotDeploymentReady waits for a deployment to be ready, or times out -func waitForDeploymentReady(clientset kubernetes.Interface, namespace, deploymentName string, timeoutSecs, periodSecs time.Duration) error { - ctx := context.TODO() - timeout := time.After(timeoutSecs * time.Second) - tick := time.NewTicker(periodSecs * time.Second) - defer tick.Stop() - - // loop until the timeout is met, or that all the replicas are ready - for { - select { - case <-timeout: - return errors.New(fmt.Sprintf("Timed out waiting for deployment to become ready: [%s]", deploymentName)) - case <-tick.C: - if deployment, err := clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}); err != nil { - // if there is an error, log it but continue through the loop - log.Error(err) - } else { - // check to see if the deployment status has succeed...if so, break out - // of the loop - if deployment.Status.ReadyReplicas == *deployment.Spec.Replicas { - return nil - } - } - } - } -} diff --git a/internal/operator/cluster/pgbadger.go b/internal/operator/cluster/pgbadger.go new file mode 100644 index 0000000000..d7af20bf0d --- /dev/null +++ b/internal/operator/cluster/pgbadger.go @@ -0,0 +1,199 @@ +package cluster + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + + "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/operator" + "github.com/crunchydata/postgres-operator/internal/util" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + + log "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +const ( + // pgBadgerServicePortName is the name used to identify the pgBadger port in + // the service + pgBadgerServicePortName = "pgbadger" +) + +// AddPGBadger ensures that a PostgreSQL cluster is able to undertake the +// actions required by the "crunchy-badger", i.e. updating the Service. +// This executes regardless if this is a standby cluster. +// +// This does not modify the Deployment that has the pgBadger sidecar. That is +// handled by the "UpdatePGBadgerSidecar" function, so it can be handled as part +// of a rolling update. +func AddPGBadger(clientset kubernetes.Interface, restconfig *rest.Config, cluster *crv1.Pgcluster) error { + ctx := context.TODO() + // set up the Services, which are still needed on a standby + services, err := getClusterInstanceServices(clientset, cluster) + if err != nil { + return err + } + + // loop over each service to perform the necessary modifications +svcLoop: + for i := range services.Items { + svc := &services.Items[i] + + // loop over the service ports to see if pgBadger port is already set up. if + // it is, we can continue and skip the outerloop + for _, svcPort := range svc.Spec.Ports { + if svcPort.Name == pgBadgerServicePortName { + continue svcLoop + } + } + + // otherwise, we need to append a service port to the list + port, err := strconv.ParseInt( + util.GetValueOrDefault(cluster.Spec.PGBadgerPort, operator.Pgo.Cluster.PGBadgerPort), 10, 32) + // if we can't parse this for whatever reason, issue a warning and continue on + if err != nil { + log.Warn(err) + } + + svcPort := v1.ServicePort{ + Name: pgBadgerServicePortName, + Protocol: v1.ProtocolTCP, + Port: int32(port), + } + + svc.Spec.Ports = append(svc.Spec.Ports, svcPort) + + // if we fail to update the service, warn, but continue on + if _, err := clientset.CoreV1().Services(svc.Namespace).Update(ctx, svc, metav1.UpdateOptions{}); err != nil { + log.Warn(err) + } + } + + return nil +} + +// RemovePGBadger disables the ability for a PostgreSQL cluster to run a +// pgBadger cluster. +// +// This does not modify the Deployment that has the pgBadger sidecar. That is +// handled by the "UpdatePGBadgerSidecar" function, so it can be handled as part +// of a rolling update. +func RemovePGBadger(clientset kubernetes.Interface, restconfig *rest.Config, cluster *crv1.Pgcluster) error { + ctx := context.TODO() + + // close the exporter port on each service + services, err := getClusterInstanceServices(clientset, cluster) + if err != nil { + return err + } + + for i := range services.Items { + svc := &services.Items[i] + svcPorts := []v1.ServicePort{} + + for _, svcPort := range svc.Spec.Ports { + // if we find the service port for the pgBadger, skip it in the loop, but + // as we will not be including it in the update + if svcPort.Name == pgBadgerServicePortName { + continue + } + + svcPorts = append(svcPorts, svcPort) + } + + svc.Spec.Ports = svcPorts + + // if we fail to update the service, warn but continue + if _, err := clientset.CoreV1().Services(svc.Namespace).Update(ctx, svc, metav1.UpdateOptions{}); err != nil { + log.Warn(err) + } + } + return nil +} + +// UpdatePGBadgerSidecar either adds or emoves the pgBadger sidcar from the +// cluster. This is meant to be used as a rolling update callback function +func UpdatePGBadgerSidecar(clientset kubeapi.Interface, cluster *crv1.Pgcluster, deployment *appsv1.Deployment) error { + // need to determine if we are adding or removing + if cluster.Spec.PGBadger { + return addPGBadgerSidecar(cluster, deployment) + } + + removePGBadgerSidecar(deployment) + + return nil +} + +// addPGBadgerSidecar adds the pgBadger sidecar to a Deployment. If pgBadger is +// already present, this call supersedes it and adds the "new version" of the +// pgBadger container. +func addPGBadgerSidecar(cluster *crv1.Pgcluster, deployment *appsv1.Deployment) error { + // use the legacy template generation to make the appropriate substitutions, + // and then get said generation to be placed into an actual Container object + template := operator.GetBadgerAddon(cluster, deployment.Name) + container := v1.Container{} + + if err := json.Unmarshal([]byte(template), &container); err != nil { + return fmt.Errorf("error unmarshalling exporter json into Container: %w ", err) + } + + // append the container to the deployment container list. However, we are + // going to do this carefully, in case the pgBadger container already exists. + // this definition will supersede any exporter container already in the + // containers list + containers := []v1.Container{} + for _, c := range deployment.Spec.Template.Spec.Containers { + // skip if this is the pgBadger container. pgBadger is added after the loop + if c.Name == pgBadgerContainerName { + continue + } + + containers = append(containers, c) + } + + // add the pgBadger container and override the containers list definition + containers = append(containers, container) + deployment.Spec.Template.Spec.Containers = containers + + return nil +} + +// removePGBadgerSidecar removes the pgBadger sidecar from a Deployment. +// +// This involves: +// - Removing the container entry for pgBadger +func removePGBadgerSidecar(deployment *appsv1.Deployment) { + // first, find the container entry in the list of containers and remove it + containers := []v1.Container{} + for _, c := range deployment.Spec.Template.Spec.Containers { + // skip if this is the pgBadger container + if c.Name == pgBadgerContainerName { + continue + } + + containers = append(containers, c) + } + + deployment.Spec.Template.Spec.Containers = containers +} diff --git a/internal/operator/cluster/pgbouncer.go b/internal/operator/cluster/pgbouncer.go index 569783a3c8..a61bef2598 100644 --- a/internal/operator/cluster/pgbouncer.go +++ b/internal/operator/cluster/pgbouncer.go @@ -1,7 +1,7 @@ package cluster /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -51,13 +51,20 @@ type PgbouncerPasswdFields struct { type PgbouncerConfFields struct { PG_PRIMARY_SERVICE_NAME string PG_PORT string + TLSEnabled bool +} + +type pgBouncerHBATemplateFields struct { + TLSEnabled bool } type pgBouncerTemplateFields struct { Name string + CASecret string ClusterName string CCPImagePrefix string CCPImageTag string + DisableFSGroup bool Port string PrimaryServiceName string ContainerResources string @@ -68,6 +75,8 @@ type pgBouncerTemplateFields struct { PodAntiAffinityLabelName string PodAntiAffinityLabelValue string Replicas int32 `json:",string"` + TLSEnabled bool + TLSSecret string } // pgBouncerDeploymentFormat is the name of the Kubernetes Deployment that @@ -79,10 +88,10 @@ const pgPort = "5432" const ( // the path to the pgbouncer uninstallation script script - pgBouncerUninstallScript = "/opt/cpm/bin/sql/pgbouncer/pgbouncer-uninstall.sql" + pgBouncerUninstallScript = "/opt/crunchy/bin/postgres-ha/sql/pgbouncer/pgbouncer-uninstall.sql" // the path to the pgbouncer installation script - pgBouncerInstallScript = "/opt/cpm/bin/sql/pgbouncer/pgbouncer-install.sql" + pgBouncerInstallScript = "/opt/crunchy/bin/postgres-ha/sql/pgbouncer/pgbouncer-install.sql" ) const ( @@ -90,30 +99,14 @@ const ( // PostgreSQL cluster sqlCheckPgBouncerInstall = `SELECT EXISTS (SELECT 1 FROM pg_catalog.pg_roles WHERE rolname = 'pgbouncer' LIMIT 1);` - // disable the pgbouncer user from logging in. This is safe from SQL injection - // as the string that is being interpolated is the util.PgBouncerUser constant - // - // This had the "PASSWORD NULL" feature, but this is only found in - // PostgreSQL 11+, and given we don't want to check for the PG version before - // running the command, we will not use it - sqlDisableLogin = `ALTER ROLE "%s" NOLOGIN;` - - // sqlEnableLogin is the SQL to update the password - // NOTE: this is safe from SQL injection as we explicitly add the inerpolated - // string as a MD5 hash and we are using the crv1.PGUserPgBouncer constant - // However, the escaping is handled in the util.SetPostgreSQLPassword function - sqlEnableLogin = `ALTER ROLE %s PASSWORD %s LOGIN;` - // sqlGetDatabasesForPgBouncer gets all the databases where pgBouncer can be // installed or uninstalled sqlGetDatabasesForPgBouncer = `SELECT datname FROM pg_catalog.pg_database WHERE datname NOT IN ('template0') AND datallowconn;` ) -var ( - // sqlUninstallPgBouncer provides the final piece of SQL to uninstall - // pgbouncer, which is to remove the user - sqlUninstallPgBouncer = fmt.Sprintf(`DROP ROLE "%s";`, crv1.PGUserPgBouncer) -) +// sqlUninstallPgBouncer provides the final piece of SQL to uninstall +// pgbouncer, which is to remove the user +var sqlUninstallPgBouncer = fmt.Sprintf(`DROP ROLE "%s";`, crv1.PGUserPgBouncer) // AddPgbouncer contains the various functions that are used to add a pgBouncer // Deployment to a PostgreSQL cluster @@ -125,7 +118,6 @@ func AddPgbouncer(clientset kubernetes.Interface, restconfig *rest.Config, clust // get the primary pod, which is needed to update the password for the // pgBouncer administrative user pod, err := util.GetPrimaryPod(clientset, cluster) - if err != nil { return err } @@ -151,11 +143,9 @@ func AddPgbouncer(clientset kubernetes.Interface, restconfig *rest.Config, clust if !cluster.Spec.Standby { secretName := util.GeneratePgBouncerSecretName(cluster.Name) pgBouncerPassword, err := util.GetPasswordFromSecret(clientset, cluster.Namespace, secretName) - if err != nil { // set the password that will be used for the "pgbouncer" PostgreSQL account newPassword, err := generatePassword() - if err != nil { return err } @@ -171,7 +161,13 @@ func AddPgbouncer(clientset kubernetes.Interface, restconfig *rest.Config, clust // attempt to update the password in PostgreSQL, as this is how pgBouncer // will properly interface with PostgreSQL - if err := setPostgreSQLPassword(clientset, restconfig, pod, cluster.Spec.Port, pgBouncerPassword); err != nil { + if err := setPostgreSQLPassword(clientset, restconfig, pod, cluster.Spec.Port, crv1.PGUserPgBouncer, pgBouncerPassword); err != nil { + return err + } + } else { + // if this is a standby cluster, we still need to create a pgBouncer Secret, + // but no credentials are available + if err := createPgbouncerSecret(clientset, cluster, ""); err != nil { return err } } @@ -272,22 +268,6 @@ func DeletePgbouncer(clientset kubernetes.Interface, restconfig *rest.Config, cl func RotatePgBouncerPassword(clientset kubernetes.Interface, restconfig *rest.Config, cluster *crv1.Pgcluster) error { ctx := context.TODO() - // determine if we are able to access the primary Pod - primaryPod, err := util.GetPrimaryPod(clientset, cluster) - - if err != nil { - return err - } - - // let's also go ahead and get the secret that contains the pgBouncer - // information. If we can't find the secret, we're basically done here - secretName := util.GeneratePgBouncerSecretName(cluster.Name) - secret, err := clientset.CoreV1().Secrets(cluster.Namespace).Get(ctx, secretName, metav1.GetOptions{}) - - if err != nil { - return err - } - // there are a few steps that must occur in order for the password to be // successfully rotated: // @@ -299,16 +279,17 @@ func RotatePgBouncerPassword(clientset kubernetes.Interface, restconfig *rest.Co // ...wouldn't it be nice if we could run this in a transaction? rolling back // is hard :( - // first, generate a new password - password, err := generatePassword() - + // let's also go ahead and get the secret that contains the pgBouncer + // information. If we can't find the secret, we're basically done here + secretName := util.GeneratePgBouncerSecretName(cluster.Name) + secret, err := clientset.CoreV1().Secrets(cluster.Namespace).Get(ctx, secretName, metav1.GetOptions{}) if err != nil { return err } - // next, update the PostgreSQL primary with the new password. If this fails - // we definitely return an error - if err := setPostgreSQLPassword(clientset, restconfig, primaryPod, cluster.Spec.Port, password); err != nil { + // update the password on the PostgreSQL instance + password, err := rotatePostgreSQLPassword(clientset, restconfig, cluster, crv1.PGUserPgBouncer) + if err != nil { return err } @@ -317,7 +298,7 @@ func RotatePgBouncerPassword(clientset kubernetes.Interface, restconfig *rest.Co // PostgreSQL to perform its authentication secret.Data["password"] = []byte(password) secret.Data["users.txt"] = util.GeneratePgBouncerUsersFileBytes( - makePostgresPassword(pgpassword.MD5, password)) + makePostgreSQLPassword(pgpassword.MD5, crv1.PGUserPgBouncer, password)) // update the secret if _, err := clientset.CoreV1().Secrets(cluster.Namespace). @@ -356,14 +337,12 @@ func UninstallPgBouncer(clientset kubernetes.Interface, restconfig *rest.Config, // determine if we are able to access the primary Pod. If not, then the // journey ends right here pod, err := util.GetPrimaryPod(clientset, cluster) - if err != nil { return err } // get the list of databases that we need to scan through databases, err := getPgBouncerDatabases(clientset, restconfig, pod, cluster.Spec.Port) - if err != nil { return err } @@ -384,7 +363,6 @@ func UninstallPgBouncer(clientset kubernetes.Interface, restconfig *rest.Config, // exec into the pod to run the query _, stderr, err := kubeapi.ExecToPodThroughAPI(restconfig, clientset, cmd, "database", pod.Name, pod.ObjectMeta.Namespace, sql) - // if there is an error executing the command, log the error message from // stderr and return the error if err != nil { @@ -407,12 +385,20 @@ func UpdatePgbouncer(clientset kubernetes.Interface, oldCluster, newCluster *crv log.Debugf("update pgbouncer from cluster [%s] in namespace [%s]", clusterName, namespace) - // we need to detect what has changed. presently, two "groups" of things could - // have changed - // 1. The # of replicas to maintain - // 2. The pgBouncer container resources + // we need to detect what has changed. This includes: + // + // 1. The Service type for the pgBouncer Service + // 2. The # of replicas to maintain + // 3. The pgBouncer container resources // - // As #2 is a bit more destructive, we'll do that last + // As #3 is a bit more destructive, we'll do that last + + // check the pgBouncer Service + if oldCluster.Spec.PgBouncer.ServiceType != newCluster.Spec.PgBouncer.ServiceType { + if err := UpdatePgBouncerService(clientset, newCluster); err != nil { + return err + } + } // check if the replicas differ if oldCluster.Spec.PgBouncer.Replicas != newCluster.Spec.PgBouncer.Replicas { @@ -444,7 +430,6 @@ func UpdatePgBouncerAnnotations(clientset kubernetes.Interface, cluster *crv1.Pg // get a list of all of the instance deployments for the cluster deployment, err := getPgBouncerDeployment(clientset, cluster) - if err != nil { return err } @@ -465,6 +450,25 @@ func UpdatePgBouncerAnnotations(clientset kubernetes.Interface, cluster *crv1.Pg return nil } +// UpdatePgBouncerService updates the information on the pgBouncer Service. +// Specifically, it determines if it should use the information from the parent +// PostgreSQL cluster or any specific overrides that are available in the +// pgBouncer spec. +func UpdatePgBouncerService(clientset kubernetes.Interface, cluster *crv1.Pgcluster) error { + info := serviceInfo{ + serviceName: fmt.Sprintf(pgBouncerDeploymentFormat, cluster.Name), + serviceNamespace: cluster.Namespace, + serviceType: cluster.Spec.ServiceType, + } + + // if the pgBouncer ServiceType is set, use that + if cluster.Spec.PgBouncer.ServiceType != "" { + info.serviceType = cluster.Spec.PgBouncer.ServiceType + } + + return updateService(clientset, info) +} + // checkPgBouncerInstall checks to see if pgBouncer is installed in the // PostgreSQL custer, which involves check to see if the pgBouncer role is // present in the PostgreSQL cluster @@ -478,7 +482,6 @@ func checkPgBouncerInstall(clientset kubernetes.Interface, restconfig *rest.Conf // exec into the pod to run the query stdout, stderr, err := kubeapi.ExecToPodThroughAPI(restconfig, clientset, cmd, "database", pod.Name, pod.ObjectMeta.Namespace, sql) - // if there is an error executing the command, log the error message from // stderr and return the error if err != nil { @@ -511,15 +514,13 @@ func createPgbouncerConfigMap(clientset kubernetes.Interface, cluster *crv1.Pgcl // generate the pgbouncer.ini information pgBouncerConf, err := generatePgBouncerConf(cluster) - if err != nil { log.Error(err) return err } // generate the pgbouncer HBA file - pgbouncerHBA, err := generatePgBouncerHBA() - + pgbouncerHBA, err := generatePgBouncerHBA(cluster) if err != nil { log.Error(err) return err @@ -561,10 +562,12 @@ func createPgBouncerDeployment(clientset kubernetes.Interface, cluster *crv1.Pgc // get the fields that will be substituted in the pgBouncer template fields := pgBouncerTemplateFields{ - Name: pgbouncerDeploymentName, - ClusterName: cluster.Name, - CCPImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix), - CCPImageTag: util.GetStandardImageTag(cluster.Spec.CCPImage, cluster.Spec.CCPImageTag), + Name: pgbouncerDeploymentName, + ClusterName: cluster.Name, + CCPImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix), + CCPImageTag: util.GetValueOrDefault(util.GetStandardImageTag(cluster.Spec.CCPImage, cluster.Spec.CCPImageTag), + operator.Pgo.Cluster.CCPImageTag), + DisableFSGroup: operator.Pgo.DisableFSGroup(), Port: cluster.Spec.Port, PGBouncerConfigMap: util.GeneratePgBouncerConfigMapName(cluster.Name), PGBouncerSecret: util.GeneratePgBouncerSecretName(cluster.Name), @@ -579,9 +582,16 @@ func createPgBouncerDeployment(clientset kubernetes.Interface, cluster *crv1.Pgc Replicas: cluster.Spec.PgBouncer.Replicas, } + // set appropriate fields if TLS is enabled + if isPgBouncerTLSEnabled(cluster) { + fields.CASecret = cluster.Spec.TLS.CASecret + fields.TLSEnabled = true + fields.TLSSecret = cluster.Spec.PgBouncer.TLSSecret + } + // For debugging purposes, put the template substitution in stdout if operator.CRUNCHY_DEBUG { - config.PgbouncerTemplate.Execute(os.Stdout, fields) + _ = config.PgbouncerTemplate.Execute(os.Stdout, fields) } // perform the actual template substitution @@ -641,7 +651,7 @@ func createPgbouncerSecret(clientset kubernetes.Interface, cluster *crv1.Pgclust Data: map[string][]byte{ "password": []byte(password), "users.txt": util.GeneratePgBouncerUsersFileBytes( - makePostgresPassword(pgpassword.MD5, password)), + makePostgreSQLPassword(pgpassword.MD5, crv1.PGUserPgBouncer, password)), }, } @@ -667,7 +677,13 @@ func createPgBouncerService(clientset kubernetes.Interface, cluster *crv1.Pgclus ClusterName: cluster.Name, // TODO: I think "port" needs to be evaluated, but I think for now using // the standard PostgreSQL port works - Port: operator.Pgo.Cluster.Port, + Port: operator.Pgo.Cluster.Port, + ServiceType: cluster.Spec.ServiceType, + } + + // override the service type if it is set specifically for pgBouncer + if cluster.Spec.PgBouncer.ServiceType != "" { + fields.ServiceType = cluster.Spec.PgBouncer.ServiceType } if err := CreateService(clientset, &fields, cluster.Namespace); err != nil { @@ -681,32 +697,7 @@ func createPgBouncerService(clientset kubernetes.Interface, cluster *crv1.Pgclus // disable the "pgbouncer" role from being able to log in. It keeps the // artificats that were created during normal pgBouncer operation func disablePgBouncer(clientset kubernetes.Interface, restconfig *rest.Config, cluster *crv1.Pgcluster) error { - log.Debugf("disable pgbouncer user on cluster [%s]", cluster.Name) - // disable the pgbouncer user in the PostgreSQL cluster. - // first, get the primary pod. If we cannot do this, let's consider it an - // error and abort - pod, err := util.GetPrimaryPod(clientset, cluster) - - if err != nil { - return err - } - - // This is safe from SQL injection as we are using constants and a well defined - // string - sql := strings.NewReader(fmt.Sprintf(sqlDisableLogin, crv1.PGUserPgBouncer)) - cmd := []string{"psql"} - - // exec into the pod to run the query - _, stderr, err := kubeapi.ExecToPodThroughAPI(restconfig, clientset, - cmd, "database", pod.Name, pod.ObjectMeta.Namespace, sql) - - // if there is an error, log the error from the stderr and return the error - if err != nil { - log.Error(stderr) - return err - } - - return nil + return disablePostgreSQLLogin(clientset, restconfig, cluster, crv1.PGUserPgBouncer) } // execPgBouncerScript runs a script pertaining to the management of pgBouncer @@ -717,7 +708,6 @@ func execPgBouncerScript(clientset kubernetes.Interface, restconfig *rest.Config // exec into the pod to run the query _, stderr, err := kubeapi.ExecToPodThroughAPI(restconfig, clientset, cmd, "database", pod.Name, pod.ObjectMeta.Namespace, nil) - // if there is an error executing the command, log the error as a warning // that it failed, and continue. It's hard to rollback from this one :\ if err != nil { @@ -727,15 +717,6 @@ func execPgBouncerScript(clientset kubernetes.Interface, restconfig *rest.Config } } -// generatePassword generates a password that is used for the "pgbouncer" -// PostgreSQL user that provides the associated pgBouncer functionality -func generatePassword() (string, error) { - // first, get the length of what the password should be - generatedPasswordLength := util.GeneratedPasswordLength(operator.Pgo.Cluster.PasswordLength) - // from there, the password can be generated! - return util.GeneratePassword(generatedPasswordLength) -} - // generatePgBouncerConf generates the content that is stored in the secret // for the "pgbouncer.ini" file func generatePgBouncerConf(cluster *crv1.Pgcluster) (string, error) { @@ -750,6 +731,7 @@ func generatePgBouncerConf(cluster *crv1.Pgcluster) (string, error) { fields := PgbouncerConfFields{ PG_PRIMARY_SERVICE_NAME: cluster.Name, PG_PORT: port, + TLSEnabled: isPgBouncerTLSEnabled(cluster), } // perform the substitution @@ -770,12 +752,15 @@ func generatePgBouncerConf(cluster *crv1.Pgcluster) (string, error) { // generatePgBouncerHBA generates the pgBouncer host-based authentication file // using the template that is vailable -func generatePgBouncerHBA() (string, error) { - // ...apparently this is overkill, but this is here from the legacy method - // and it seems like it's "ok" to leave it like this for now... +func generatePgBouncerHBA(cluster *crv1.Pgcluster) (string, error) { + // we may have some substitutions if this is a TLS enabled cluster + fields := pgBouncerHBATemplateFields{ + TLSEnabled: isPgBouncerTLSEnabled(cluster), + } + doc := bytes.Buffer{} - if err := config.PgbouncerHBATemplate.Execute(&doc, struct{}{}); err != nil { + if err := config.PgbouncerHBATemplate.Execute(&doc, fields); err != nil { log.Error(err) return "", err @@ -800,7 +785,6 @@ func getPgBouncerDatabases(clientset kubernetes.Interface, restconfig *rest.Conf // exec into the pod to run the query stdout, stderr, err := kubeapi.ExecToPodThroughAPI(restconfig, clientset, cmd, "database", pod.Name, pod.ObjectMeta.Namespace, sql) - // if there is an error executing the command, log the error message from // stderr and return the error if err != nil { @@ -823,7 +807,6 @@ func getPgBouncerDeployment(clientset kubernetes.Interface, cluster *crv1.Pgclus pgbouncerDeploymentName := fmt.Sprintf(pgBouncerDeploymentFormat, cluster.Name) deployment, err := clientset.AppsV1().Deployments(cluster.Namespace).Get(ctx, pgbouncerDeploymentName, metav1.GetOptions{}) - if err != nil { return nil, err } @@ -836,7 +819,6 @@ func getPgBouncerDeployment(clientset kubernetes.Interface, cluster *crv1.Pgclus func installPgBouncer(clientset kubernetes.Interface, restconfig *rest.Config, pod *v1.Pod, port string) error { // get the list of databases that we need to scan through databases, err := getPgBouncerDatabases(clientset, restconfig, pod, port) - if err != nil { return err } @@ -852,17 +834,10 @@ func installPgBouncer(clientset kubernetes.Interface, restconfig *rest.Config, p return nil } -// makePostgresPassword creates the expected hash for a password type for a -// PostgreSQL password -func makePostgresPassword(passwordType pgpassword.PasswordType, password string) string { - // get the PostgreSQL password generate based on the password type - // as all of these values are valid, this not not error - postgresPassword, _ := pgpassword.NewPostgresPassword(passwordType, crv1.PGUserPgBouncer, password) - - // create the PostgreSQL style hashed password and return - hashedPassword, _ := postgresPassword.Build() - - return hashedPassword +// isPgBouncerTLSEnabled returns true if TLS is enabled for pgBouncer, which +// means that TLS is enabled for the PostgreSQL cluster itself +func isPgBouncerTLSEnabled(cluster *crv1.Pgcluster) bool { + return cluster.Spec.PgBouncer.TLSSecret != "" && cluster.Spec.TLS.IsTLSEnabled() } // publishPgBouncerEvent publishes one of the events on the event stream @@ -905,26 +880,6 @@ func publishPgBouncerEvent(eventType string, cluster *crv1.Pgcluster) { } } -// setPostgreSQLPassword updates the pgBouncer password in the PostgreSQL -// cluster by executing into the primary Pod and changing it -func setPostgreSQLPassword(clientset kubernetes.Interface, restconfig *rest.Config, pod *v1.Pod, port, password string) error { - log.Debug("set pgbouncer password in PostgreSQL") - - // we use the PostgreSQL "md5" hashing mechanism here to pre-hash the - // password. This is semi-hard coded but is now prepped for SCRAM as a - // password type can be passed in. Almost to SCRAM! - sqlpgBouncerPassword := makePostgresPassword(pgpassword.MD5, password) - - if err := util.SetPostgreSQLPassword(clientset, restconfig, pod, - port, crv1.PGUserPgBouncer, sqlpgBouncerPassword, sqlEnableLogin); err != nil { - log.Error(err) - return err - } - - // and that's all! - return nil -} - // updatePgBouncerReplicas updates the pgBouncer Deployment with the number // of replicas (Pods) that it should run. Presently, this is fairly naive, but // as pgBouncer is "semi-stateful" we may want to improve upon this in the @@ -935,7 +890,6 @@ func updatePgBouncerReplicas(clientset kubernetes.Interface, cluster *crv1.Pgclu // get the pgBouncer deployment so the resources can be updated deployment, err := getPgBouncerDeployment(clientset, cluster) - if err != nil { return err } @@ -961,7 +915,6 @@ func updatePgBouncerResources(clientset kubernetes.Interface, cluster *crv1.Pgcl // get the pgBouncer deployment so the resources can be updated deployment, err := getPgBouncerDeployment(clientset, cluster) - if err != nil { return err } diff --git a/internal/operator/cluster/pgbouncer_test.go b/internal/operator/cluster/pgbouncer_test.go index 06ff30d8b6..4b23048288 100644 --- a/internal/operator/cluster/pgbouncer_test.go +++ b/internal/operator/cluster/pgbouncer_test.go @@ -1,7 +1,7 @@ package cluster /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,23 +18,56 @@ package cluster import ( "testing" - pgpassword "github.com/crunchydata/postgres-operator/internal/postgres/password" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" ) -func TestMakePostgresPassword(t *testing.T) { +func TestIsPgBouncerTLSEnabled(t *testing.T) { + cluster := &crv1.Pgcluster{ + Spec: crv1.PgclusterSpec{ + PgBouncer: crv1.PgBouncerSpec{}, + TLS: crv1.TLSSpec{}, + }, + } - t.Run("md5", func(t *testing.T) { - t.Run("valid", func(t *testing.T) { - passwordType := pgpassword.MD5 - password := "datalake" - expected := "md56294153764d389dc6830b6ce4f923cdb" + t.Run("true", func(t *testing.T) { + cluster.Spec.PgBouncer.TLSSecret = "pgbouncer-tls" + cluster.Spec.TLS.CASecret = "ca" + cluster.Spec.TLS.TLSSecret = "postgres-tls" - actual := makePostgresPassword(passwordType, password) + if !isPgBouncerTLSEnabled(cluster) { + t.Errorf("expected true") + } + }) + + t.Run("false", func(t *testing.T) { + t.Run("neither enabled", func(t *testing.T) { + cluster.Spec.PgBouncer.TLSSecret = "" + cluster.Spec.TLS.CASecret = "" + cluster.Spec.TLS.TLSSecret = "" - if actual != expected { - t.Errorf("expected: %q actual: %q", expected, actual) + if isPgBouncerTLSEnabled(cluster) { + t.Errorf("expected false") } }) + t.Run("postgres TLS enabled only", func(t *testing.T) { + cluster.Spec.PgBouncer.TLSSecret = "" + cluster.Spec.TLS.CASecret = "ca" + cluster.Spec.TLS.TLSSecret = "postgres-tls" + + if isPgBouncerTLSEnabled(cluster) { + t.Errorf("expected false") + } + }) + + t.Run("pgbouncer TLS enabled only", func(t *testing.T) { + cluster.Spec.PgBouncer.TLSSecret = "pgbouncer-tls" + cluster.Spec.TLS.CASecret = "" + cluster.Spec.TLS.TLSSecret = "" + + if isPgBouncerTLSEnabled(cluster) { + t.Errorf("expected false") + } + }) }) } diff --git a/internal/operator/cluster/rmdata.go b/internal/operator/cluster/rmdata.go deleted file mode 100644 index d82405fd18..0000000000 --- a/internal/operator/cluster/rmdata.go +++ /dev/null @@ -1,92 +0,0 @@ -// Package cluster holds the cluster CRD logic and definitions -// A cluster is comprised of a primary service, replica service, -// primary deployment, and replica deployment -package cluster - -/* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import ( - "bytes" - "context" - "encoding/json" - "os" - "strconv" - - "github.com/crunchydata/postgres-operator/internal/config" - "github.com/crunchydata/postgres-operator/internal/operator" - "github.com/crunchydata/postgres-operator/internal/util" - crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" - log "github.com/sirupsen/logrus" - v1batch "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -type RmdataJob struct { - JobName string - ClusterName string - PGOImagePrefix string - PGOImageTag string - // SecurityContext string - RemoveData string - RemoveBackup string - IsBackup string - IsReplica string -} - -func CreateRmdataJob(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string, removeData, removeBackup, isReplica, isBackup bool) error { - ctx := context.TODO() - var err error - - jobName := cl.Spec.Name + "-rmdata-" + util.RandStringBytesRmndr(4) - - jobFields := RmdataJob{ - JobName: jobName, - ClusterName: cl.Spec.Name, - PGOImagePrefix: util.GetValueOrDefault(cl.Spec.PGOImagePrefix, operator.Pgo.Pgo.PGOImagePrefix), - PGOImageTag: operator.Pgo.Pgo.PGOImageTag, - RemoveData: strconv.FormatBool(removeData), - RemoveBackup: strconv.FormatBool(removeBackup), - IsBackup: strconv.FormatBool(isReplica), - IsReplica: strconv.FormatBool(isBackup), - } - - doc := bytes.Buffer{} - - if err := config.RmdatajobTemplate.Execute(&doc, jobFields); err != nil { - log.Error(err.Error()) - return err - } - - if operator.CRUNCHY_DEBUG { - config.RmdatajobTemplate.Execute(os.Stdout, jobFields) - } - - newjob := v1batch.Job{} - - if err := json.Unmarshal(doc.Bytes(), &newjob); err != nil { - log.Error("error unmarshalling json into Job " + err.Error()) - return err - } - - // set the container image to an override value, if one exists - operator.SetContainerImageOverride(config.CONTAINER_IMAGE_PGO_RMDATA, - &newjob.Spec.Template.Spec.Containers[0]) - - _, err = clientset.BatchV1().Jobs(namespace). - Create(ctx, &newjob, metav1.CreateOptions{}) - return err -} diff --git a/internal/operator/cluster/rolling.go b/internal/operator/cluster/rolling.go new file mode 100644 index 0000000000..867c23be20 --- /dev/null +++ b/internal/operator/cluster/rolling.go @@ -0,0 +1,281 @@ +package cluster + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/kubeapi" + "github.com/crunchydata/postgres-operator/internal/operator" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + + log "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type deploymentType int + +const ( + deploymentTypePrimary deploymentType = iota + deploymentTypeReplica +) + +const ( + rollingUpdatePeriod = 4 * time.Second + rollingUpdateTimeout = 60 * time.Second +) + +// RollingUpdate performs a type of "rolling update" on a series of Deployments +// of a PostgreSQL cluster in an attempt to minimize downtime. +// +// The functions take a function that serves to update the contents of a +// Deployment. +// +// The rolling update is performed as such: +// +// 1. Each replica is updated. A replica is shut down and changes are applied +// The Operator waits until the replica is back online (and/or a time period) +// And moves on to the next one +// 2. A controlled switchover is performed. The Operator chooses the best +// candidate replica for the switch over. +// 3. The former primary is then shut down and updated. +// +// If this is not a HA cluster, then the Deployment is just singly restarted +// +// Erroring during this process can be fun. If an error occurs within the middle +// of a rolling update, in order to avoid placing the cluster in an +// indeterminate state, most errors are just logged for later troubleshooting +func RollingUpdate(clientset kubeapi.Interface, restConfig *rest.Config, cluster *crv1.Pgcluster, + updateFunc func(kubeapi.Interface, *crv1.Pgcluster, *appsv1.Deployment) error) error { + log.Debugf("rolling update for cluster %q", cluster.Name) + + // we need to determine which deployments are replicas and which is the + // primary. Note, that based on external factors, this can change during the + // execution of this function, so this is our best guess at the time of the + // rolling update being performed. + // + // Given the craziness of a distributed world, we may even unearth two + // primaries, or no primaries! So we will need to gracefully handle that as + // well + // + // We will get this through the Pod list as the role label is on the Pod + instances, err := generateDeploymentTypeMap(clientset, cluster) + // If we fail to generate the deployment type map, we just have to fail here. + // We can't do any updates + if err != nil { + return err + } + + // go through all of the replicas and perform the modifications + for i := range instances[deploymentTypeReplica] { + deployment := instances[deploymentTypeReplica][i] + + // Try to apply the update. If it returns an error during the process, + // continue on to the next replica + if err := applyUpdateToPostgresInstance(clientset, restConfig, cluster, deployment, updateFunc); err != nil { + log.Error(err) + continue + } + + // Ensure that the replica comes back up and can be connected to, otherwise + // keep moving on. This involves waiting for the Deployment to come back + // up... + if err := waitForDeploymentReady(clientset, deployment.Namespace, deployment.Name, + rollingUpdatePeriod, rollingUpdateTimeout); err != nil { + log.Warn(err) + } + + // ...followed by wiating for the PostgreSQL instance to come back up + if err := waitForPostgresInstance(clientset, restConfig, cluster, deployment, + rollingUpdatePeriod, rollingUpdateTimeout); err != nil { + log.Warn(err) + } + } + + // if there is at least one replica and only one primary, perform a controlled + // switchover. + // + // if multiple primaries were found, we don't know how we would want to + // properly switch over, so we will let Patroni make the decision in this case + // as part of an uncontrolled failover. At this point, we should have eligible + // replicas that have the updated Deployment state. + if len(instances[deploymentTypeReplica]) > 0 && len(instances[deploymentTypePrimary]) == 1 { + // if the switchover fails, warn that it failed but continue on + if err := operator.Switchover(clientset, restConfig, cluster, ""); err != nil { + log.Warnf("switchover failed: %s", err.Error()) + } + } + + // finally, go through the list of primaries (which should only be one...) + // and apply the update. At this point we do not need to wait for anything, + // as we should have either already promoted a new primary, or this is a + // single instance cluster + for i := range instances[deploymentTypePrimary] { + if err := applyUpdateToPostgresInstance(clientset, restConfig, cluster, + instances[deploymentTypePrimary][i], updateFunc); err != nil { + log.Error(err) + } + } + + return nil +} + +// applyUpdateToPostgresInstance performs an update on an individual PostgreSQL +// instance. It first ensures that the update can be applied. If it can, it will +// safely turn of the PostgreSQL instance before modifying the Deployment +// template. +func applyUpdateToPostgresInstance(clientset kubeapi.Interface, restConfig *rest.Config, + cluster *crv1.Pgcluster, deployment appsv1.Deployment, + updateFunc func(kubeapi.Interface, *crv1.Pgcluster, *appsv1.Deployment) error) error { + ctx := context.TODO() + + // apply any updates, if they cannot be applied, then return an error here + if err := updateFunc(clientset, cluster, &deployment); err != nil { + return err + } + + // Before applying the update, we want to explicitly stop PostgreSQL on each + // instance. This prevents PostgreSQL from having to boot up in crash + // recovery mode. + // + // If an error is returned, warn, but proceed with the function + if err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil { + log.Warn(err) + } + + // Perform the update. + _, err := clientset.AppsV1().Deployments(deployment.Namespace). + Update(ctx, &deployment, metav1.UpdateOptions{}) + + return err +} + +// generateDeploymentTypeMap takes a list of Deployments and determines what +// they represent: a primary (hopefully only one) or replicas +func generateDeploymentTypeMap(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (map[deploymentType][]appsv1.Deployment, error) { + ctx := context.TODO() + + // get a list of all of the instance deployments for the cluster + deployments, err := operator.GetInstanceDeployments(clientset, cluster) + if err != nil { + return nil, err + } + + options := metav1.ListOptions{ + LabelSelector: fields.AndSelectors( + fields.OneTermEqualSelector(config.LABEL_PG_CLUSTER, cluster.Name), + fields.OneTermEqualSelector(config.LABEL_PG_DATABASE, config.LABEL_TRUE), + ).String(), + } + + pods, err := clientset.CoreV1().Pods(cluster.Namespace).List(ctx, options) + // if we can't find any of the Pods, we can't make the proper determiniation + if err != nil { + return nil, err + } + + // go through each Deployment and make a determination about its type. If we + // ultimately cannot do that, treat the deployment as a "replica" + instances := map[deploymentType][]appsv1.Deployment{ + deploymentTypePrimary: {}, + deploymentTypeReplica: {}, + } + + for i, deployment := range deployments.Items { + for _, pod := range pods.Items { + // if the Pod doesn't match, continue + if deployment.Name != pod.ObjectMeta.GetLabels()[config.LABEL_DEPLOYMENT_NAME] { + continue + } + + // found matching Pod, determine if it's a primary or replica + if pod.ObjectMeta.GetLabels()[config.LABEL_PGHA_ROLE] == config.LABEL_PGHA_ROLE_PRIMARY { + instances[deploymentTypePrimary] = append(instances[deploymentTypePrimary], deployments.Items[i]) + } else { + instances[deploymentTypeReplica] = append(instances[deploymentTypeReplica], deployments.Items[i]) + } + + // we found the (or at least a) matching Pod, so we can break the loop now + break + } + } + + return instances, nil +} + +// generatePostgresReadyCommand creates the command used to test if a PostgreSQL +// instance is ready +func generatePostgresReadyCommand(port string) []string { + return []string{"pg_isready", "-p", port} +} + +// waitForPostgresInstance waits for a PostgreSQL instance within a Pod is ready +// to accept connections +func waitForPostgresInstance(clientset kubernetes.Interface, restConfig *rest.Config, + cluster *crv1.Pgcluster, deployment appsv1.Deployment, periodSecs, timeoutSecs time.Duration) error { + ctx := context.TODO() + + // try to find the Pod that should be exec'd into + options := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), + LabelSelector: fields.AndSelectors( + fields.OneTermEqualSelector(config.LABEL_PG_CLUSTER, cluster.Name), + fields.OneTermEqualSelector(config.LABEL_PG_DATABASE, config.LABEL_TRUE), + fields.OneTermEqualSelector(config.LABEL_DEPLOYMENT_NAME, deployment.Name), + ).String(), + } + pods, err := clientset.CoreV1().Pods(deployment.Namespace).List(ctx, options) + + // if the Pod selection errors, we can't really proceed + if err != nil { + return fmt.Errorf("could not find pods to check postgres instance readiness: %w", err) + } else if len(pods.Items) == 0 { + return fmt.Errorf("could not find any postgres pods") + } + + // get the first pod...we'll just have to presume this is the active primary + // as we've done all we good to narrow it down at this point + pod := pods.Items[0] + cmd := generatePostgresReadyCommand(cluster.Spec.Port) + + // start polling to test if the Postgres instance is available to accept + // connections + if err := wait.Poll(periodSecs, timeoutSecs, func() (bool, error) { + // check to see if PostgreSQL is ready to accept connections + s, _, _ := kubeapi.ExecToPodThroughAPI(restConfig, clientset, + cmd, "database", pod.Name, pod.Namespace, nil) + + // really we should find a way to get the exit code in the future, but + // in the interim, we know that we can accept connections if the below + // string is present + return strings.Contains(s, "accepting connections"), nil + }); err != nil { + return fmt.Errorf("readiness timeout reached for start up of cluster %q instance %q", + cluster.Name, deployment.Name) + } + + return nil +} diff --git a/internal/operator/cluster/service.go b/internal/operator/cluster/service.go index d2438f77c5..0a740b79c4 100644 --- a/internal/operator/cluster/service.go +++ b/internal/operator/cluster/service.go @@ -4,7 +4,7 @@ package cluster /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -26,18 +26,28 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/operator" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) +// serviceInfo is a structured way of compiling all of the info required to +// update a service +type serviceInfo struct { + serviceName string + serviceNamespace string + serviceType v1.ServiceType +} + // CreateService ... func CreateService(clientset kubernetes.Interface, fields *ServiceTemplateFields, namespace string) error { ctx := context.TODO() var serviceDoc bytes.Buffer - //create the service if it doesn't exist + // create the service if it doesn't exist _, err := clientset.CoreV1().Services(namespace).Get(ctx, fields.Name, metav1.GetOptions{}) if err != nil { @@ -48,7 +58,7 @@ func CreateService(clientset kubernetes.Interface, fields *ServiceTemplateFields } if operator.CRUNCHY_DEBUG { - config.ServiceTemplate.Execute(os.Stdout, fields) + _ = config.ServiceTemplate.Execute(os.Stdout, fields) } service := corev1.Service{} @@ -62,5 +72,60 @@ func CreateService(clientset kubernetes.Interface, fields *ServiceTemplateFields } return err +} + +// UpdateClusterService updates parameters (really just one) on a Service that +// represents a PostgreSQL cluster +func UpdateClusterService(clientset kubernetes.Interface, cluster *crv1.Pgcluster) error { + return updateService(clientset, serviceInfo{ + serviceName: cluster.Name, + serviceNamespace: cluster.Namespace, + serviceType: cluster.Spec.ServiceType, + }) +} + +// UpdateClusterService updates parameters (really just one) on a Service that +// represents a PostgreSQL replca instance +func UpdateReplicaService(clientset kubernetes.Interface, cluster *crv1.Pgcluster, replica *crv1.Pgreplica) error { + serviceType := cluster.Spec.ServiceType + // if the replica has a specific service type, override with that + if replica.Spec.ServiceType != "" { + serviceType = replica.Spec.ServiceType + } + + return updateService(clientset, serviceInfo{ + serviceName: replica.Spec.ClusterName + ReplicaSuffix, + serviceNamespace: replica.Namespace, + serviceType: serviceType, + }) +} + +// updateService does the legwork for updating a service +func updateService(clientset kubernetes.Interface, info serviceInfo) error { + ctx := context.TODO() + + // first, attempt to get the Service. If we cannot do that, then we can't + // update the service + svc, err := clientset.CoreV1().Services(info.serviceNamespace).Get(ctx, info.serviceName, metav1.GetOptions{}) + if err != nil { + return err + } + + // update the desired attributes, which is really just the ServiceType + svc.Spec.Type = info.serviceType + + // ...so, while the documentation says that any "NodePort" settings are wiped + // if the type is not "NodePort", this is actually not the case, so we need to + // overcompensate for that + // Ref: https://godoc.org/k8s.io/api/core/v1#ServicePort + if svc.Spec.Type != v1.ServiceTypeNodePort { + for i := range svc.Spec.Ports { + svc.Spec.Ports[i].NodePort = 0 + } + } + + _, err = clientset.CoreV1().Services(info.serviceNamespace).Update(ctx, svc, metav1.UpdateOptions{}) + + return err } diff --git a/internal/operator/cluster/standby.go b/internal/operator/cluster/standby.go index 1444e78a45..5c9bb6aa7f 100644 --- a/internal/operator/cluster/standby.go +++ b/internal/operator/cluster/standby.go @@ -1,7 +1,7 @@ package cluster /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -58,7 +58,7 @@ const ( "create_replica_methods": [ "pgbackrest_standby" ], - "restore_command": "source /opt/cpm/bin/pgbackrest/pgbackrest-set-env.sh && pgbackrest archive-get %f \"%p\"" + "restore_command": "source /opt/crunchy/bin/postgres-ha/pgbackrest/pgbackrest-set-env.sh && pgbackrest archive-get %f \"%p\"" }` ) @@ -189,10 +189,10 @@ func EnableStandby(clientset kubernetes.Interface, cluster crv1.Pgcluster) error // grab the json stored in the config annotation configJSONStr := dcsConfigMap.ObjectMeta.Annotations["config"] var configJSON map[string]interface{} - json.Unmarshal([]byte(configJSONStr), &configJSON) + _ = json.Unmarshal([]byte(configJSONStr), &configJSON) var standbyJSON map[string]interface{} - json.Unmarshal([]byte(standbyClusterConfigJSON), &standbyJSON) + _ = json.Unmarshal([]byte(standbyClusterConfigJSON), &standbyJSON) // set standby_cluster to default config unless already set if _, ok := configJSON["standby_cluster"]; !ok { @@ -213,7 +213,7 @@ func EnableStandby(clientset kubernetes.Interface, cluster crv1.Pgcluster) error // Delete the "leader" configMap if err = clientset.CoreV1().ConfigMaps(namespace).Delete(ctx, leaderConfigMapName, metav1.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) { - log.Error("Unable to delete configMap %s while enabling standby mode for cluster "+ + log.Errorf("Unable to delete configMap %s while enabling standby mode for cluster "+ "%s: %v", leaderConfigMapName, clusterName, err) return err } @@ -244,10 +244,9 @@ func EnableStandby(clientset kubernetes.Interface, cluster crv1.Pgcluster) error } func publishStandbyEnabled(cluster *crv1.Pgcluster) error { - clusterName := cluster.Name - //capture the cluster creation event + // capture the cluster creation event topics := make([]string, 1) topics[0] = events.EventTopicCluster diff --git a/internal/operator/cluster/upgrade.go b/internal/operator/cluster/upgrade.go index d497753c28..620a75ada3 100644 --- a/internal/operator/cluster/upgrade.go +++ b/internal/operator/cluster/upgrade.go @@ -1,7 +1,7 @@ package cluster /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,15 +17,20 @@ package cluster import ( "context" + "crypto/sha256" "errors" "fmt" "io/ioutil" + "path" + "regexp" "strconv" + "strings" "time" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/kubeapi" "github.com/crunchydata/postgres-operator/internal/operator" + pgoconfig "github.com/crunchydata/postgres-operator/internal/operator/config" "github.com/crunchydata/postgres-operator/internal/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" "github.com/crunchydata/postgres-operator/pkg/events" @@ -36,6 +41,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "sigs.k8s.io/yaml" ) @@ -48,6 +54,15 @@ const ( postgresGISHAImage = "crunchy-postgres-gis-ha" ) +// nssWrapperRegex is the regular expression that is utilized to determine if the UsePAM +// setting is set to 'yes' in the sshd_config (as it might be for versions up to v4.6.1, +// v4.5.2 and v4.4.3) +var usePAMRegex = regexp.MustCompile(`(?im)^UsePAM\s*yes`) + +// legacyS3CASHA256Digest informs us if we should override the S3 CA with the +// new bundle +const legacyS3CASHA256Digest = "d1c290ea1e4544dec1934931fbfa1fb2060eb3a0f2239ba191f444ecbce35cbb" + // AddUpgrade implements the upgrade workflow in accordance with the received pgtask // the general process is outlined below: // 1) get the existing pgcluster CRD instance that matches the name provided in the pgtask @@ -76,7 +91,7 @@ func AddUpgrade(clientset kubeapi.Interface, upgrade *crv1.Pgtask, namespace str } // update the workflow status to 'in progress' while the upgrade takes place - updateUpgradeWorkflow(clientset, namespace, upgrade.ObjectMeta.Labels[crv1.PgtaskWorkflowID], crv1.PgtaskUpgradeInProgress) + _ = updateUpgradeWorkflow(clientset, namespace, upgrade.ObjectMeta.Labels[crv1.PgtaskWorkflowID], crv1.PgtaskUpgradeInProgress) // grab the existing pgo version oldpgoversion := pgcluster.ObjectMeta.Labels[config.LABEL_PGO_VERSION] @@ -99,10 +114,20 @@ func AddUpgrade(clientset kubeapi.Interface, upgrade *crv1.Pgtask, namespace str SetReplicaNumber(pgcluster, replicas) // create the 'pgha-config' configmap while taking the init value from any existing 'pgha-default-config' configmap - createUpgradePGHAConfigMap(clientset, pgcluster, namespace) + _ = createUpgradePGHAConfigMap(clientset, pgcluster, namespace) // delete the existing pgcluster CRDs and other resources that will be recreated - deleteBeforeUpgrade(clientset, pgcluster.Name, currentPrimary, namespace, pgcluster.Spec.Standby) + if err := deleteBeforeUpgrade(clientset, pgcluster, currentPrimary, namespace); err != nil { + log.Error("refusing to upgrade due to unsuccessful resource removal") + PublishUpgradeEvent(events.EventUpgradeClusterFailure, namespace, upgrade, err.Error()) + return + } + + // update the unix socket directories parameter so it no longer include /crunchyadm and + // set any path references to the /opt/crunchy... paths + if err = updateClusterConfig(clientset, pgcluster, namespace); err != nil { + log.Errorf("error updating %s-pgha-config configmap during upgrade of cluster %s, Error: %v", pgcluster.Name, pgcluster.Name, err) + } // recreate new Backrest Repo secret that was just deleted recreateBackrestRepoSecret(clientset, upgradeTargetClusterName, namespace, operator.PgoNamespace) @@ -131,7 +156,6 @@ func AddUpgrade(clientset kubeapi.Interface, upgrade *crv1.Pgtask, namespace str PublishUpgradeEvent(events.EventUpgradeClusterCreateSubmitted, namespace, upgrade, "") log.Debugf("finished main upgrade workflow for cluster: %s", upgradeTargetClusterName) - } // getPrimaryPodDeploymentName searches through the pods associated with this pgcluster for the 'primary' pod, @@ -151,7 +175,6 @@ func getPrimaryPodDeploymentName(clientset kubernetes.Interface, cluster *crv1.P // only consider pods that are running pods, err := clientset.CoreV1().Pods(cluster.Namespace).List(ctx, options) - if err != nil { log.Errorf("no pod with the primary role label was found for cluster %s. Error: %s", cluster.Name, err.Error()) return "" @@ -223,14 +246,14 @@ func handleReplicas(clientset kubeapi.Interface, clusterName, currentPrimaryPVC, log.Debugf("scaling down pgreplica: %s", replicaList.Items[index].Name) ScaleDownBase(clientset, &replicaList.Items[index], namespace) log.Debugf("deleting pgreplica CRD: %s", replicaList.Items[index].Name) - clientset.CrunchydataV1().Pgreplicas(namespace).Delete(ctx, replicaList.Items[index].Name, metav1.DeleteOptions{}) + _ = clientset.CrunchydataV1().Pgreplicas(namespace).Delete(ctx, replicaList.Items[index].Name, metav1.DeleteOptions{}) // if the existing replica PVC is not being used as the primary PVC, delete // note this will not remove any leftover PVCs from previous failovers, // those will require manual deletion so as to avoid any accidental // deletion of valid PVCs. if replicaList.Items[index].Name != currentPrimaryPVC { deletePropagation := metav1.DeletePropagationForeground - clientset. + _ = clientset. CoreV1().PersistentVolumeClaims(namespace). Delete(ctx, replicaList.Items[index].Name, metav1.DeleteOptions{PropagationPolicy: &deletePropagation}) log.Debugf("deleting replica pvc: %s", replicaList.Items[index].Name) @@ -251,22 +274,34 @@ func handleReplicas(clientset kubeapi.Interface, clusterName, currentPrimaryPVC, // (e.g. pgo create cluster hippo --replica-count=2) but will not included any replicas // created using the 'pgo scale' command func SetReplicaNumber(pgcluster *crv1.Pgcluster, numReplicas string) { - pgcluster.Spec.Replicas = numReplicas } // deleteBeforeUpgrade deletes the deployments, services, pgcluster, jobs, tasks and default configmaps before attempting // to upgrade the pgcluster deployment. This preserves existing secrets, non-standard configmaps and service definitions // for use in the newly upgraded cluster. -func deleteBeforeUpgrade(clientset kubeapi.Interface, clusterName, currentPrimary, namespace string, isStandby bool) { +func deleteBeforeUpgrade(clientset kubeapi.Interface, pgcluster *crv1.Pgcluster, currentPrimary, namespace string) error { ctx := context.TODO() - // first, get all deployments for the pgcluster in question + // first, indicate that there is an upgrade occurring on this custom resource + // this will prevent the rmdata job from firing off + annotations := pgcluster.ObjectMeta.GetAnnotations() + annotations[config.ANNOTATION_UPGRADE_IN_PROGRESS] = config.LABEL_TRUE + pgcluster.ObjectMeta.SetAnnotations(annotations) + + if _, err := clientset.CrunchydataV1().Pgclusters(namespace).Update(ctx, + pgcluster, metav1.UpdateOptions{}); err != nil { + log.Errorf("unable to set annotations to keep backups and data: %s", err) + return err + } + + // next, get all deployments for the pgcluster in question deployments, err := clientset. AppsV1().Deployments(namespace). - List(ctx, metav1.ListOptions{LabelSelector: config.LABEL_PG_CLUSTER + "=" + clusterName}) + List(ctx, metav1.ListOptions{LabelSelector: config.LABEL_PG_CLUSTER + "=" + pgcluster.Name}) if err != nil { log.Errorf("unable to get deployments. Error: %s", err) + return err } // next, delete those deployments @@ -280,59 +315,57 @@ func deleteBeforeUpgrade(clientset kubeapi.Interface, clusterName, currentPrimar } // wait until the backrest shared repo pod deployment has been deleted before continuing - waitStatus := deploymentWait(clientset, namespace, clusterName+"-backrest-shared-repo", 180, 10) + waitStatus := deploymentWait(clientset, namespace, pgcluster.Name+"-backrest-shared-repo", + 180*time.Second, 10*time.Second) log.Debug(waitStatus) // wait until the primary pod deployment has been deleted before continuing - waitStatus = deploymentWait(clientset, namespace, currentPrimary, 180, 10) + waitStatus = deploymentWait(clientset, namespace, currentPrimary, + 180*time.Second, 10*time.Second) log.Debug(waitStatus) // delete the pgcluster - clientset.CrunchydataV1().Pgclusters(namespace).Delete(ctx, clusterName, metav1.DeleteOptions{}) + _ = clientset.CrunchydataV1().Pgclusters(namespace).Delete(ctx, pgcluster.Name, metav1.DeleteOptions{}) // delete all existing job references deletePropagation := metav1.DeletePropagationForeground - clientset. + _ = clientset. BatchV1().Jobs(namespace). DeleteCollection(ctx, metav1.DeleteOptions{PropagationPolicy: &deletePropagation}, - metav1.ListOptions{LabelSelector: config.LABEL_PG_CLUSTER + "=" + clusterName}) + metav1.ListOptions{LabelSelector: config.LABEL_PG_CLUSTER + "=" + pgcluster.Name}) // delete all existing pgtask references except for the upgrade task // Note: this will be deleted by the existing pgcluster creation process once the // updated pgcluster created and processed by the cluster controller - if err = deleteNonupgradePgtasks(clientset, config.LABEL_PG_CLUSTER+"="+clusterName, namespace); err != nil { - log.Errorf("error while deleting pgtasks for cluster %s, Error: %v", clusterName, err) + if err = deleteNonupgradePgtasks(clientset, config.LABEL_PG_CLUSTER+"="+pgcluster.Name, namespace); err != nil { + log.Errorf("error while deleting pgtasks for cluster %s, Error: %v", pgcluster.Name, err) } // delete the leader configmap used by the Postgres Operator since this information may change after // the upgrade is complete // Note: deletion is required for cluster recreation - clientset.CoreV1().ConfigMaps(namespace).Delete(ctx, clusterName+"-leader", metav1.DeleteOptions{}) + _ = clientset.CoreV1().ConfigMaps(namespace).Delete(ctx, pgcluster.Name+"-leader", metav1.DeleteOptions{}) // delete the '-pgha-default-config' configmap, if it exists so the config syncer // will not try to use it instead of '-pgha-config' - clientset.CoreV1().ConfigMaps(namespace).Delete(ctx, clusterName+"-pgha-default-config", metav1.DeleteOptions{}) + _ = clientset.CoreV1().ConfigMaps(namespace).Delete(ctx, pgcluster.Name+"-pgha-default-config", metav1.DeleteOptions{}) + + return nil } // deploymentWait is modified from cluster.waitForDeploymentDelete. It simply waits for the current primary deployment // deletion to complete before proceeding with the rest of the pgcluster upgrade. func deploymentWait(clientset kubernetes.Interface, namespace, deploymentName string, timeoutSecs, periodSecs time.Duration) string { ctx := context.TODO() - timeout := time.After(timeoutSecs * time.Second) - tick := time.NewTicker(periodSecs * time.Second) - defer tick.Stop() - - for { - select { - case <-timeout: - return fmt.Sprintf("Timed out waiting for deployment to be deleted: [%s]", deploymentName) - case <-tick.C: - _, err := clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) - if err != nil { - return fmt.Sprintf("Deployment %s has been deleted.", deploymentName) - } - } + + if err := wait.Poll(periodSecs, timeoutSecs, func() (bool, error) { + _, err := clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) + return err != nil, nil + }); err != nil { + return fmt.Sprintf("Timed out waiting for deployment to be deleted: [%s]", deploymentName) } + + return fmt.Sprintf("Deployment %s has been deleted.", deploymentName) } // deleteNonupgradePgtasks deletes all existing pgtasks by selector with the exception of the @@ -438,6 +471,19 @@ func recreateBackrestRepoSecret(clientset kubernetes.Interface, clustername, nam if err == nil { if b, ok := secret.Data["aws-s3-ca.crt"]; ok { config.BackrestS3CA = b + + // if this matches the old AWS S3 CA bundle, update to the new one. + if fmt.Sprintf("%x", sha256.Sum256(config.BackrestS3CA)) == legacyS3CASHA256Digest { + file := path.Join("/default-pgo-backrest-repo/aws-s3-ca.crt") + + // if we can't read the contents of the file for whatever reason, warn, + // otherwise, update the entry in the Secret + if contents, err := ioutil.ReadFile(file); err != nil { + log.Warn(err) + } else { + config.BackrestS3CA = contents + } + } } if b, ok := secret.Data["aws-s3-key"]; ok { config.BackrestS3Key = string(b) @@ -447,9 +493,18 @@ func recreateBackrestRepoSecret(clientset kubernetes.Interface, clustername, nam } } + var repoSecret *v1.Secret if err == nil { - err = util.CreateBackrestRepoSecrets(clientset, config) + repoSecret, err = util.CreateBackrestRepoSecrets(clientset, config) + } + if err != nil { + log.Errorf("error generating new backrest repo secrets during pgcluster upgrade: %v", err) } + + if err := updatePGBackRestSSHDConfig(clientset, repoSecret, namespace); err != nil { + log.Errorf("error upgrading pgBackRest sshd_config: %v", err) + } + if err != nil { log.Errorf("error generating new backrest repo secrets during pgcluster upgrade: %v", err) } @@ -459,22 +514,133 @@ func recreateBackrestRepoSecret(clientset kubernetes.Interface, clustername, nam // for the current Postgres Operator version, updating or deleting values where appropriate, and sets // an expected status so that the CRD object can be recreated. func preparePgclusterForUpgrade(pgcluster *crv1.Pgcluster, parameters map[string]string, oldpgoversion, currentPrimary string) { - // first, update the PGO version references to the current Postgres Operator version pgcluster.ObjectMeta.Labels[config.LABEL_PGO_VERSION] = parameters[config.LABEL_PGO_VERSION] pgcluster.Spec.UserLabels[config.LABEL_PGO_VERSION] = parameters[config.LABEL_PGO_VERSION] // next, capture the existing Crunchy Postgres Exporter configuration settings (previous to version // 4.5.0 referred to as Crunchy Collect), if they exist, and store them in the current labels + // 4.6.0 added this value to the spec as "Exporter", so the next step ensure + // that the value is migrated over if value, ok := pgcluster.ObjectMeta.Labels["crunchy_collect"]; ok { pgcluster.ObjectMeta.Labels[config.LABEL_EXPORTER] = value - delete(pgcluster.ObjectMeta.Labels, "crunchy_collect") } + delete(pgcluster.ObjectMeta.Labels, "crunchy_collect") + // Note that this is the *user labels*, the above is in the metadata labels if value, ok := pgcluster.Spec.UserLabels["crunchy_collect"]; ok { pgcluster.Spec.UserLabels[config.LABEL_EXPORTER] = value - delete(pgcluster.Spec.UserLabels, "crunchy_collect") } + delete(pgcluster.Spec.UserLabels, "crunchy_collect") + + // convert the metrics label over to using a proper definition. Give the user + // label precedence. + if value, ok := pgcluster.ObjectMeta.Labels[config.LABEL_EXPORTER]; ok { + pgcluster.Spec.Exporter, _ = strconv.ParseBool(value) + } + delete(pgcluster.ObjectMeta.Labels, config.LABEL_EXPORTER) + + // again, note this is *user* labels, the above are the metadata labels + if value, ok := pgcluster.Spec.UserLabels[config.LABEL_EXPORTER]; ok { + pgcluster.Spec.Exporter, _ = strconv.ParseBool(value) + } + delete(pgcluster.Spec.UserLabels, config.LABEL_EXPORTER) + + // 4.6.0 moved pgBadger to use an attribute instead of a label. If this label + // exists on the current CRD, move the value to the attribute. + if ok, _ := strconv.ParseBool(pgcluster.ObjectMeta.GetLabels()["crunchy-pgbadger"]); ok { + pgcluster.Spec.PGBadger = true + } + delete(pgcluster.ObjectMeta.Labels, "crunchy-pgbadger") + + // 4.6.0 moved the format "service-type" label into the ServiceType CRD + // attribute, so we may need to do the same + if val, ok := pgcluster.Spec.UserLabels["service-type"]; ok { + pgcluster.Spec.ServiceType = v1.ServiceType(val) + } + delete(pgcluster.Spec.UserLabels, "service-type") + + // 4.6.0 removed the "pg-pod-anti-affinity" label from user labels, as this is + // superfluous and handled through other processes. We can explicitly + // eliminate it + delete(pgcluster.Spec.UserLabels, "pg-pod-anti-affinity") + + // 4.6.0 moved the "autofail" label to the DisableAutofail attribute. Given + // by default we need to start in an autofailover state, we just delete the + // legacy attribute + delete(pgcluster.ObjectMeta.Labels, "autofail") + + // 4.6.0 moved the node labels to the custom resource objects in a more + // structure way. if we have a node label, then let's migrate it to that + // format + if pgcluster.Spec.UserLabels["NodeLabelKey"] != "" && pgcluster.Spec.UserLabels["NodeLabelValue"] != "" { + // transition to using the native NodeAffinity objects. In the previous + // setup, this was, by default, preferred node affinity. Designed to match + // a standard setup. + requirement := v1.NodeSelectorRequirement{ + Key: pgcluster.Spec.UserLabels["NodeLabelKey"], + Values: []string{pgcluster.Spec.UserLabels["NodeLabelValue"]}, + Operator: v1.NodeSelectorOpIn, + } + term := v1.PreferredSchedulingTerm{ + Weight: crv1.NodeAffinityDefaultWeight, // taking this from the former template + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{requirement}, + }, + } + + // and here is our default node affinity rule + pgcluster.Spec.NodeAffinity = crv1.NodeAffinitySpec{ + Default: &v1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{term}, + }, + } + } + // erase all trace of this + delete(pgcluster.Spec.UserLabels, "NodeLabelKey") + delete(pgcluster.Spec.UserLabels, "NodeLabelValue") + + // 4.6.0 moved the "backrest-storage-type" label to a CRD attribute, well, + // really an array of CRD attributes, which we need to map the various + // attributes to. "local" will be mapped the "posix" to match the pgBackRest + // nomenclature + // + // If we come back with an empty array, we will default it to posix + if val, ok := pgcluster.Spec.UserLabels["backrest-storage-type"]; ok { + pgcluster.Spec.BackrestStorageTypes = make([]crv1.BackrestStorageType, 0) + storageTypes := strings.Split(val, ",") + + // loop through each of the storage types processed and determine which of + // the standard storage types it matches + for _, s := range storageTypes { + for _, storageType := range crv1.BackrestStorageTypes { + // if this is not the storage type, continue looping + if crv1.BackrestStorageType(s) != storageType { + continue + } + + // so this is the storage type. However, if it's "local" let's update + // it to be posix + if storageType == crv1.BackrestStorageTypeLocal { + pgcluster.Spec.BackrestStorageTypes = append(pgcluster.Spec.BackrestStorageTypes, + crv1.BackrestStorageTypePosix) + } else { + pgcluster.Spec.BackrestStorageTypes = append(pgcluster.Spec.BackrestStorageTypes, storageType) + } + + // we can break the inner loop + break + } + } + + // remember: if somehow this is empty, add "posix" + if len(pgcluster.Spec.BackrestStorageTypes) == 0 { + pgcluster.Spec.BackrestStorageTypes = append(pgcluster.Spec.BackrestStorageTypes, + crv1.BackrestStorageTypePosix) + } + } + // and delete the label + delete(pgcluster.Spec.UserLabels, "backrest-storage-type") // since the current primary label is not used in this version of the Postgres Operator, // delete it before moving on to other upgrade tasks @@ -512,13 +678,10 @@ func preparePgclusterForUpgrade(pgcluster *crv1.Pgcluster, parameters map[string // use with PostGIS enabled pgclusters pgcluster.Spec.CCPImageTag = parameters[config.LABEL_CCP_IMAGE_KEY] - // set a default autofail value of "true" to enable Patroni's replication. If left to an existing - // value of "false," Patroni will be in a paused state and unable to sync all replicas to the - // current timeline - pgcluster.ObjectMeta.Labels[config.LABEL_AUTOFAIL] = "true" - - // Don't think we'll need to do this, but leaving the comment for now.... - // pgcluster.ObjectMeta.Labels[config.LABEL_POD_ANTI_AFFINITY] = "" + // set a default disable autofail value of "false" to enable Patroni's replication. + // If left to an existing value of "true," Patroni will be in a paused state + // and unable to sync all replicas to the current timeline + pgcluster.Spec.DisableAutofail = false // set pgouser to match the default configuration currently in use after the Operator upgrade pgcluster.ObjectMeta.Labels[config.LABEL_PGOUSER] = parameters[config.LABEL_PGOUSER] @@ -533,10 +696,6 @@ func preparePgclusterForUpgrade(pgcluster *crv1.Pgcluster, parameters map[string pgcluster.Spec.PGBadgerPort = operator.Pgo.Cluster.PGBadgerPort } - // ensure that the pgo-backrest label is set to 'true' since pgbackrest is required for normal - // cluster operations in this version of the Postgres Operator - pgcluster.ObjectMeta.Labels[config.LABEL_BACKREST] = "true" - // added in 4.2 and copied from configuration in 4.4 if pgcluster.Spec.BackrestS3Bucket == "" { pgcluster.Spec.BackrestS3Bucket = operator.Pgo.Cluster.BackrestS3Bucket @@ -726,3 +885,111 @@ func publishUpgradeClusterFailureEvent(eventHeader events.EventHeader, clusterna log.Errorf("error publishing event. Error: %s", err.Error()) } } + +// updateClusterConfig updates PG configuration for cluster via its Distributed Configuration Store +// (DCS) according to the key/value pairs defined in the pgConfig map, specifically by updating +// the -pgha-config ConfigMap. The configuration settings specified are +// applied to the entire cluster via the DCS configuration included within this the +// -pgha-config ConfigMap. +func updateClusterConfig(clientset kubeapi.Interface, pgcluster *crv1.Pgcluster, namespace string) error { + + // first, define the names for the two main sections of the -pgha-config configmap + + // -dcs-config + dcsConfigName := fmt.Sprintf(pgoconfig.PGHADCSConfigName, pgcluster.Name) + // -local-config + localConfigName := fmt.Sprintf(pgoconfig.PGHALocalConfigName, pgcluster.Name) + + // next, get the -pgha-config configmap + clusterConfig, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.TODO(), fmt.Sprintf("%s-pgha-config", pgcluster.Name), metav1.GetOptions{}) + if err != nil { + return err + } + + // prepare DCS config struct + dcsConf := &pgoconfig.DCSConfig{} + if err := yaml.Unmarshal([]byte(clusterConfig.Data[dcsConfigName]), dcsConf); err != nil { + return err + } + + // prepare LocalDB config struct + localDBConf := &pgoconfig.LocalDBConfig{} + if err := yaml.Unmarshal([]byte(clusterConfig.Data[localConfigName]), localDBConf); err != nil { + return err + } + + // set the updated path values for both DCS and LocalDB configs, if the fields and maps exist + // as of version 4.6, the /crunchyadm directory no longer exists (previously set as a unix socket directory) + // and the /opt/cpm... directories are now set under /opt/crunchy + if dcsConf.PostgreSQL != nil && dcsConf.PostgreSQL.Parameters != nil { + dcsConf.PostgreSQL.Parameters["unix_socket_directories"] = "/tmp" + dcsConf.PostgreSQL.Parameters["archive_command"] = `source /opt/crunchy/bin/postgres-ha/pgbackrest/pgbackrest-set-env.sh && pgbackrest archive-push "%p"` + dcsConf.PostgreSQL.RecoveryConf["restore_command"] = `source /opt/crunchy/bin/postgres-ha/pgbackrest/pgbackrest-set-env.sh && pgbackrest archive-get %f "%p"` + } + + if localDBConf.PostgreSQL.Callbacks != nil { + localDBConf.PostgreSQL.Callbacks.OnRoleChange = "/opt/crunchy/bin/postgres-ha/callbacks/pgha-on-role-change.sh" + } + if localDBConf.PostgreSQL.PGBackRest != nil { + localDBConf.PostgreSQL.PGBackRest.Command = "/opt/crunchy/bin/postgres-ha/pgbackrest/pgbackrest-create-replica.sh replica" + } + if localDBConf.PostgreSQL.PGBackRestStandby != nil { + localDBConf.PostgreSQL.PGBackRestStandby.Command = "/opt/crunchy/bin/postgres-ha/pgbackrest/pgbackrest-create-replica.sh standby" + } + + // set up content and patch DCS config + dcsContent, err := yaml.Marshal(dcsConf) + if err != nil { + return err + } + + // patch the configmap with the DCS config updates + if err := pgoconfig.PatchConfigMapData(clientset, clusterConfig, dcsConfigName, dcsContent); err != nil { + return err + } + + // set up content and patch localDB config + localDBContent, err := yaml.Marshal(localDBConf) + if err != nil { + return err + } + + // patch the configmap with the localDB config updates + if err := pgoconfig.PatchConfigMapData(clientset, clusterConfig, localConfigName, localDBContent); err != nil { + return err + } + + // get the newly patched -pgha-config configmap + patchedClusterConfig, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.TODO(), fmt.Sprintf("%s-pgha-config", pgcluster.Name), metav1.GetOptions{}) + if err != nil { + return err + } + + // sync the changes to the configmap to the DCS + return pgoconfig.NewDCS(patchedClusterConfig, clientset, pgcluster.GetObjectMeta().GetLabels()[config.LABEL_PGHA_SCOPE]).Sync() +} + +// updatePGBackRestSSHDConfig is responsible for upgrading the sshd_config file as needed across +// operator versions to ensure proper functionality with pgBackRest +func updatePGBackRestSSHDConfig(clientset kubernetes.Interface, repoSecret *v1.Secret, + namespace string) error { + + ctx := context.TODO() + updatedRepoSecret := repoSecret.DeepCopy() + + // For versions prior to v4.6.2, the UsePAM setting might be set to 'yes' as previously + // required to workaround a known Docker issue. Since this issue has since been resolved, + // we now want to ensure this setting is set to 'no'. + if !usePAMRegex.MatchString(string(updatedRepoSecret.Data["sshd_config"])) { + return nil + } + + updatedRepoSecret.Data["sshd_config"] = + []byte(usePAMRegex.ReplaceAllString(string(updatedRepoSecret.Data["sshd_config"]), + "UsePAM no")) + + _, err := clientset.CoreV1().Secrets(namespace).Update(ctx, updatedRepoSecret, + metav1.UpdateOptions{}) + + return err +} diff --git a/internal/operator/clusterutilities.go b/internal/operator/clusterutilities.go index d4ae78706b..df56891e6b 100644 --- a/internal/operator/clusterutilities.go +++ b/internal/operator/clusterutilities.go @@ -1,7 +1,7 @@ package operator /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -36,10 +36,6 @@ import ( "k8s.io/client-go/kubernetes" ) -// consolidate with cluster.affinityTemplateFields -const AffinityInOperator = "In" -const AFFINITY_NOTINOperator = "NotIn" - // PGHAConfigMapSuffix defines the suffix for the name of the PGHA configMap created for each PG // cluster const PGHAConfigMapSuffix = "pgha-config" @@ -52,9 +48,9 @@ const ( PGHAConfigInitSetting = "init" // PGHAConfigReplicaBootstrapRepoType defines an override for the type of repo (local, S3, etc.) // that should be utilized when bootstrapping a replica (i.e. it override the - // PGBACKREST_REPO_TYPE env var in the environment). Allows for dynamic changing of the + // PGBACKREST_REPO1_TYPE env var in the environment). Allows for dynamic changing of the // backrest repo type without requiring container restarts (as would be required to update - // PGBACKREST_REPO_TYPE). + // PGBACKREST_REPO1_TYPE). PGHAConfigReplicaBootstrapRepoType = "replica-bootstrap-repo-type" ) @@ -72,12 +68,6 @@ const ( preferScheduleIgnoreExec affinityType = "preferredDuringSchedulingIgnoredDuringExecution" ) -type affinityTemplateFields struct { - NodeLabelKey string - NodeLabelValue string - OperatorValue string -} - type podAntiAffinityTemplateFields struct { AffinityType affinityType ClusterName string @@ -94,12 +84,12 @@ type exporterTemplateFields struct { PGOImagePrefix string PgPort string ExporterPort string - CollectSecretName string + ExporterSecretName string ContainerResources string TLSOnly bool } -//consolidate +// consolidate type badgerTemplateFields struct { CCPImageTag string CCPImagePrefix string @@ -112,7 +102,7 @@ type PgbackrestEnvVarsTemplateFields struct { PgbackrestDBPath string PgbackrestRepo1Path string PgbackrestRepo1Host string - PgbackrestRepo1Type string + PgbackrestRepo1Type crv1.BackrestStorageType PgbackrestLocalAndS3Storage bool PgbackrestPGPort string } @@ -156,31 +146,31 @@ type DeploymentTemplateFields struct { DeploymentLabels string // PodAnnotations are user-specified annotations that can be applied to a // Pod, e.g. annotations specific to a PostgreSQL instance - PodAnnotations string - PodLabels string - DataPathOverride string - ArchiveMode string - PVCName string - RootSecretName string - UserSecretName string - PrimarySecretName string - SecurityContext string - ContainerResources string - NodeSelector string - ConfVolume string - ExporterAddon string - BadgerAddon string - PgbackrestEnvVars string - PgbackrestS3EnvVars string - PgmonitorEnvVars string - ScopeLabel string - Replicas string - IsInit bool - EnableCrunchyadm bool - ReplicaReinitOnStartFail bool - PodAntiAffinity string - SyncReplication bool - Standby bool + PodAnnotations string + PodLabels string + DataPathOverride string + PVCName string + RootSecretName string + UserSecretName string + PrimarySecretName string + SecurityContext string + ContainerResources string + NodeSelector string + ConfVolume string + ExporterAddon string + BadgerAddon string + PgbackrestEnvVars string + PgbackrestS3EnvVars string + PgmonitorEnvVars string + ScopeLabel string + Replicas string + IsInit bool + ReplicaReinitOnStartFail bool + PodAntiAffinity string + PodAntiAffinityLabelName string + PodAntiAffinityLabelValue string + SyncReplication bool + Standby bool // A comma-separated list of tablespace names...this could be an array, but // given how this would ultimately be interpreted in a shell script somewhere // down the line, it's easier for the time being to do it this way. In the @@ -188,6 +178,9 @@ type DeploymentTemplateFields struct { Tablespaces string TablespaceVolumes string TablespaceVolumeMounts string + // Tolerations is an optional parameter that provides Pod tolerations that + // have been transformed into JSON encoding from an actual Tolerations object + Tolerations string // The following fields set the TLS requirements as well as provide // information on how to configure TLS in a PostgreSQL cluster // TLSEnabled enables TLS in a cluster if set to true. Only works in actuality @@ -253,6 +246,7 @@ func GetAnnotations(cluster *crv1.Pgcluster, annotationType crv1.ClusterAnnotati for k, v := range cluster.Spec.Annotations.Postgres { annotations[k] = v } + case crv1.ClusterAnnotationGlobal: // no-op as its handled in the loop above } // if the map is empty, return an empty string @@ -262,7 +256,6 @@ func GetAnnotations(cluster *crv1.Pgcluster, annotationType crv1.ClusterAnnotati // let's try to create a JSON document out of the above doc, err := json.Marshal(annotations) - // if there is an error, warn in our logs and return an empty string if err != nil { log.Errorf("could not set custom annotations: %q", err) @@ -272,43 +265,38 @@ func GetAnnotations(cluster *crv1.Pgcluster, annotationType crv1.ClusterAnnotati return string(doc) } -//consolidate with cluster.GetPgbackrestEnvVars -func GetPgbackrestEnvVars(cluster *crv1.Pgcluster, backrestEnabled, depName, port, storageType string) string { - if backrestEnabled == "true" { - fields := PgbackrestEnvVarsTemplateFields{ - PgbackrestStanza: "db", - PgbackrestRepo1Host: cluster.Name + "-backrest-shared-repo", - PgbackrestRepo1Path: util.GetPGBackRestRepoPath(*cluster), - PgbackrestDBPath: "/pgdata/" + depName, - PgbackrestPGPort: port, - PgbackrestRepo1Type: GetRepoType(storageType), - PgbackrestLocalAndS3Storage: IsLocalAndS3Storage(storageType), - } +// consolidate with cluster.GetPgbackrestEnvVars +func GetPgbackrestEnvVars(cluster *crv1.Pgcluster, depName, port string) string { + fields := PgbackrestEnvVarsTemplateFields{ + PgbackrestStanza: "db", + PgbackrestRepo1Host: cluster.Name + "-backrest-shared-repo", + PgbackrestRepo1Path: GetPGBackRestRepoPath(cluster), + PgbackrestDBPath: "/pgdata/" + depName, + PgbackrestPGPort: port, + PgbackrestRepo1Type: GetRepoType(cluster), + PgbackrestLocalAndS3Storage: IsLocalAndS3Storage(cluster), + } - var doc bytes.Buffer - err := config.PgbackrestEnvVarsTemplate.Execute(&doc, fields) - if err != nil { - log.Error(err.Error()) - return "" - } - return doc.String() + doc := bytes.Buffer{} + if err := config.PgbackrestEnvVarsTemplate.Execute(&doc, fields); err != nil { + log.Error(err.Error()) + return "" } - return "" + return doc.String() } // GetPgbackrestBootstrapEnvVars returns a string containing the pgBackRest environment variables // for a bootstrap job func GetPgbackrestBootstrapEnvVars(restoreClusterName, depName string, restoreFromSecret *v1.Secret) (string, error) { - fields := PgbackrestEnvVarsTemplateFields{ PgbackrestStanza: "db", PgbackrestDBPath: fmt.Sprintf("/pgdata/%s", depName), PgbackrestRepo1Path: restoreFromSecret.Annotations[config.ANNOTATION_REPO_PATH], PgbackrestPGPort: restoreFromSecret.Annotations[config.ANNOTATION_PG_PORT], PgbackrestRepo1Host: fmt.Sprintf(util.BackrestRepoDeploymentName, restoreClusterName), - PgbackrestRepo1Type: "posix", // just set to the default, can be overridden via CLI args + PgbackrestRepo1Type: crv1.BackrestStorageTypePosix, // just set to the default, can be overridden via CLI args } var doc bytes.Buffer @@ -330,85 +318,84 @@ func GetBackrestDeployment(clientset kubernetes.Interface, cluster *crv1.Pgclust return deployment, err } -func GetBadgerAddon(clientset kubernetes.Interface, namespace string, cluster *crv1.Pgcluster, pgbadger_target string) string { - - spec := cluster.Spec +// GetBadgerAddon is a legacy method that generates a JSONish string to be used +// to add a pgBadger sidecar to a PostgreSQL instance +func GetBadgerAddon(cluster *crv1.Pgcluster, target string) string { + if !cluster.Spec.PGBadger { + return "" + } - if cluster.Labels[config.LABEL_BADGER] == "true" { - log.Debug("crunchy_badger was found as a label on cluster create") - badgerTemplateFields := badgerTemplateFields{} - badgerTemplateFields.CCPImageTag = util.GetStandardImageTag(spec.CCPImage, spec.CCPImageTag) - badgerTemplateFields.BadgerTarget = pgbadger_target - badgerTemplateFields.PGBadgerPort = spec.PGBadgerPort - badgerTemplateFields.CCPImagePrefix = util.GetValueOrDefault(spec.CCPImagePrefix, Pgo.Cluster.CCPImagePrefix) + log.Debugf("pgBadger enabled for cluster %q", cluster.Name) - var badgerDoc bytes.Buffer - err := config.BadgerTemplate.Execute(&badgerDoc, badgerTemplateFields) - if err != nil { - log.Error(err.Error()) - return "" - } + badgerTemplateFields := badgerTemplateFields{ + BadgerTarget: target, + CCPImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, Pgo.Cluster.CCPImagePrefix), + CCPImageTag: util.GetStandardImageTag(cluster.Spec.CCPImage, cluster.Spec.CCPImageTag), + PGBadgerPort: cluster.Spec.PGBadgerPort, + } - if CRUNCHY_DEBUG { - config.BadgerTemplate.Execute(os.Stdout, badgerTemplateFields) - } - return badgerDoc.String() + if CRUNCHY_DEBUG { + _ = config.BadgerTemplate.Execute(os.Stdout, badgerTemplateFields) } - return "" -} -func GetExporterAddon(clientset kubernetes.Interface, namespace string, spec *crv1.PgclusterSpec) string { + doc := bytes.Buffer{} + if err := config.BadgerTemplate.Execute(&doc, badgerTemplateFields); err != nil { + log.Error(err) + return "" + } - if spec.UserLabels[config.LABEL_EXPORTER] == "true" { - log.Debug("crunchy-postgres-exporter was found as a label on cluster create") + return doc.String() +} - log.Debugf("creating exporter secret for cluster %s", spec.Name) - err := util.CreateSecret(clientset, spec.Name, spec.CollectSecretName, config.LABEL_EXPORTER_PG_USER, - Pgo.Cluster.PgmonitorPassword, namespace) - if err != nil { - log.Error(err) - } +// GetExporterAddon returns the template used to create an exporter container +// for metrics. This is semi-legacy, but updated to match the current way of +// handling this +func GetExporterAddon(spec crv1.PgclusterSpec) string { + // do not execute if metrics are not enabled + if !spec.Exporter { + return "" + } - exporterTemplateFields := exporterTemplateFields{} - exporterTemplateFields.Name = spec.Name - exporterTemplateFields.JobName = spec.Name - exporterTemplateFields.PGOImageTag = Pgo.Pgo.PGOImageTag - exporterTemplateFields.ExporterPort = spec.ExporterPort - exporterTemplateFields.PGOImagePrefix = util.GetValueOrDefault(spec.PGOImagePrefix, Pgo.Pgo.PGOImagePrefix) - exporterTemplateFields.PgPort = spec.Port - exporterTemplateFields.CollectSecretName = spec.CollectSecretName - exporterTemplateFields.ContainerResources = GetResourcesJSON(spec.ExporterResources, spec.ExporterLimits) + exporterTemplateFields := exporterTemplateFields{ + ContainerResources: GetResourcesJSON(spec.ExporterResources, spec.ExporterLimits), + ExporterPort: spec.ExporterPort, + ExporterSecretName: util.GenerateExporterSecretName(spec.ClusterName), + JobName: spec.Name, + Name: spec.Name, + PGOImagePrefix: util.GetValueOrDefault(spec.PGOImagePrefix, Pgo.Pgo.PGOImagePrefix), + PGOImageTag: Pgo.Pgo.PGOImageTag, + PgPort: spec.Port, // see if TLS only is set. however, this also requires checking to see if // TLS is enabled in this case. The reason is that even if TLS is only just // enabled, because the connection is over an internal interface, we do not // need to have the overhead of a TLS connection - exporterTemplateFields.TLSOnly = spec.TLS.IsTLSEnabled() && spec.TLSOnly + TLSOnly: (spec.TLS.IsTLSEnabled() && spec.TLSOnly), + } - var exporterDoc bytes.Buffer - err = config.ExporterTemplate.Execute(&exporterDoc, exporterTemplateFields) - if err != nil { - log.Error(err.Error()) - return "" - } + if CRUNCHY_DEBUG { + _ = config.ExporterTemplate.Execute(os.Stdout, exporterTemplateFields) + } - if CRUNCHY_DEBUG { - config.ExporterTemplate.Execute(os.Stdout, exporterTemplateFields) - } - return exporterDoc.String() + exporterDoc := bytes.Buffer{} + + if err := config.ExporterTemplate.Execute(&exporterDoc, exporterTemplateFields); err != nil { + log.Error(err) + return "" } - return "" + + return exporterDoc.String() } -//consolidate with cluster.GetConfVolume +// consolidate with cluster.GetConfVolume func GetConfVolume(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace string) string { ctx := context.TODO() var configMapStr string - //check for user provided configmap + // check for user provided configmap if cl.Spec.CustomConfig != "" { _, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, cl.Spec.CustomConfig, metav1.GetOptions{}) if err != nil { - //you should NOT get this error because of apiserver validation of this value! + // you should NOT get this error because of apiserver validation of this value! log.Errorf("%s was not found, error, skipping user provided configMap", cl.Spec.CustomConfig) } else { log.Debugf("user provided configmap %s was used for this cluster", cl.Spec.CustomConfig) @@ -416,7 +403,7 @@ func GetConfVolume(clientset kubernetes.Interface, cl *crv1.Pgcluster, namespace } } - //check for global custom configmap "pgo-custom-pg-config" + // check for global custom configmap "pgo-custom-pg-config" _, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, config.GLOBAL_CUSTOM_CONFIGMAP, metav1.GetOptions{}) if err == nil { return `"pgo-custom-pg-config"` @@ -468,6 +455,24 @@ func CreatePGHAConfigMap(clientset kubernetes.Interface, cluster *crv1.Pgcluster return nil } +// GetNodeAffinity returns any node affinity rules for the Operator in a JSON +// string. If there is no data or there is an error, it will return an empty +// string. +func GetNodeAffinity(nodeAffinity *v1.NodeAffinity) string { + if nodeAffinity == nil { + return "" + } + + data, err := json.MarshalIndent(nodeAffinity, "", " ") + + if err != nil { + log.Warnf("could not generate node affinity: %s", err.Error()) + return "" + } + + return string(data) +} + // GetTablespaceNamePVCMap returns a map of the tablespace name to the PVC name func GetTablespaceNamePVCMap(clusterName string, tablespaceStorageTypeMap map[string]string) map[string]string { tablespacePVCMap := map[string]string{} @@ -497,7 +502,6 @@ func GetInstanceDeployments(clientset kubernetes.Interface, cluster *crv1.Pgclus clusterDeployments, err := clientset. AppsV1().Deployments(cluster.Namespace). List(ctx, metav1.ListOptions{LabelSelector: selector}) - if err != nil { return nil, err } @@ -635,37 +639,9 @@ func GetLabelsFromMap(labels map[string]string) string { return strings.TrimSuffix(output, ",") } -// GetAffinity ... -func GetAffinity(nodeLabelKey, nodeLabelValue string, affoperator string) string { - log.Debugf("GetAffinity with nodeLabelKey=[%s] nodeLabelKey=[%s] and operator=[%s]\n", nodeLabelKey, nodeLabelValue, affoperator) - output := "" - if nodeLabelKey == "" { - return output - } - - affinityTemplateFields := affinityTemplateFields{} - affinityTemplateFields.NodeLabelKey = nodeLabelKey - affinityTemplateFields.NodeLabelValue = nodeLabelValue - affinityTemplateFields.OperatorValue = affoperator - - var affinityDoc bytes.Buffer - err := config.AffinityTemplate.Execute(&affinityDoc, affinityTemplateFields) - if err != nil { - log.Error(err.Error()) - return output - } - - if CRUNCHY_DEBUG { - config.AffinityTemplate.Execute(os.Stdout, affinityTemplateFields) - } - - return affinityDoc.String() -} - // GetPodAntiAffinity returns the populated pod anti-affinity json that should be attached to // the various pods comprising the pg cluster func GetPodAntiAffinity(cluster *crv1.Pgcluster, deploymentType crv1.PodAntiAffinityDeployment, podAntiAffinityType crv1.PodAntiAffinityType) string { - log.Debugf("GetPodAnitAffinity with clusterName=[%s]", cluster.Spec.Name) // run through the checks on the pod anti-affinity type to see if it is not @@ -692,6 +668,7 @@ func GetPodAntiAffinity(cluster *crv1.Pgcluster, deploymentType crv1.PodAntiAffi return "" case crv1.PodAntiAffinityRequired: templateAffinityType = requireScheduleIgnoreExec + case crv1.PodAntiAffinityPreffered: // no-op as its the default value } podAntiAffinityTemplateFields := podAntiAffinityTemplateFields{ @@ -711,7 +688,7 @@ func GetPodAntiAffinity(cluster *crv1.Pgcluster, deploymentType crv1.PodAntiAffi } if CRUNCHY_DEBUG { - config.PodAntiAffinityTemplate.Execute(os.Stdout, podAntiAffinityTemplateFields) + _ = config.PodAntiAffinityTemplate.Execute(os.Stdout, podAntiAffinityTemplateFields) } return podAntiAffinityDoc.String() @@ -754,6 +731,7 @@ func GetPodAntiAffinityType(cluster *crv1.Pgcluster, deploymentType crv1.PodAnti return podAntiAffinityType } } + case crv1.PodAntiAffinityDeploymentDefault: // no-op as its the default setting } // check to see if the value for the cluster anti-affinity is set. If so, use @@ -769,21 +747,23 @@ func GetPodAntiAffinityType(cluster *crv1.Pgcluster, deploymentType crv1.PodAnti // GetPgmonitorEnvVars populates the pgmonitor env var template, which contains any // pgmonitor env vars that need to be included in the Deployment spec for a PG cluster. -func GetPgmonitorEnvVars(metricsEnabled, exporterSecret string) string { - if metricsEnabled == "true" { - fields := PgmonitorEnvVarsTemplateFields{ - ExporterSecret: exporterSecret, - } +func GetPgmonitorEnvVars(cluster *crv1.Pgcluster) string { + if !cluster.Spec.Exporter { + return "" + } - var doc bytes.Buffer - err := config.PgmonitorEnvVarsTemplate.Execute(&doc, fields) - if err != nil { - log.Error(err.Error()) - return "" - } - return doc.String() + fields := PgmonitorEnvVarsTemplateFields{ + ExporterSecret: util.GenerateExporterSecretName(cluster.Name), } - return "" + + doc := bytes.Buffer{} + + if err := config.PgmonitorEnvVarsTemplate.Execute(&doc, fields); err != nil { + log.Error(err) + return "" + } + + return doc.String() } // GetPgbackrestS3EnvVars retrieves the values for the various configuration settings require to @@ -794,10 +774,15 @@ func GetPgmonitorEnvVars(metricsEnabled, exporterSecret string) string { // pgBackRest environment variables required to enable S3 support. After the template has been // executed with the proper values, the result is then returned a string for inclusion in the PG // and pgBackRest deployments. -func GetPgbackrestS3EnvVars(cluster crv1.Pgcluster, clientset kubernetes.Interface, - ns string) string { +func GetPgbackrestS3EnvVars(clientset kubernetes.Interface, cluster crv1.Pgcluster) string { + // determine if backups are enabled to be stored on S3 + isS3 := false + + for _, storageType := range cluster.Spec.BackrestStorageTypes { + isS3 = isS3 || (storageType == crv1.BackrestStorageTypeS3) + } - if !strings.Contains(cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE], "s3") { + if !isS3 { return "" } @@ -866,7 +851,6 @@ func GetPgbackrestS3EnvVars(cluster crv1.Pgcluster, clientset kubernetes.Interfa // option is used, then returns the pgBackRest S3 configuration value to either enable // or disable TLS verification as the expected string value. func GetS3VerifyTLSSetting(cluster *crv1.Pgcluster) string { - // If the pgcluster has already been set, either by the PGO client or from the // CRD definition, parse the boolean value given. // If this value is not set, then parse the value stored in the default @@ -891,7 +875,6 @@ func GetS3VerifyTLSSetting(cluster *crv1.Pgcluster) string { // for inclusion in the PG and pgBackRest deployments. func GetPgbackrestBootstrapS3EnvVars(pgDataSourceRestoreFrom string, restoreFromSecret *v1.Secret) string { - s3EnvVars := PgbackrestS3EnvVarsTemplateFields{ PgbackrestS3Key: util.BackRestRepoSecretKeyAWSS3KeyAWSS3Key, PgbackrestS3KeySecret: util.BackRestRepoSecretKeyAWSS3KeyAWSS3KeySecret, @@ -942,6 +925,9 @@ func UpdatePGHAConfigInitFlag(clientset kubernetes.Interface, initVal bool, clus case err != nil: return fmt.Errorf("unable to find the default pgha configMap found for cluster %s using selector %s, unable to set "+ "init value to false", clusterName, selector) + case len(configMapList.Items) == 0: + return fmt.Errorf("no pgha configMaps found for cluster %s using selector %s, unable to set "+ + "init value to false", clusterName, selector) case len(configMapList.Items) > 1: return fmt.Errorf("more than one default pgha configMap found for cluster %s using selector %s, unable to set "+ "init value to false", clusterName, selector) @@ -964,10 +950,9 @@ func GetSyncReplication(specSyncReplication *bool) bool { // alawys use the value from the CR if explicitly provided if specSyncReplication != nil { return *specSyncReplication - } else if Pgo.Cluster.SyncReplication { - return true } - return false + + return Pgo.Cluster.SyncReplication } // OverrideClusterContainerImages is a helper function that provides the @@ -980,15 +965,12 @@ func OverrideClusterContainerImages(containers []v1.Container) { var containerImageName string // there are a few images we need to check for: // 1. "database" image, which is PostgreSQL or some flavor of it - // 2. "crunchyadm" image, which helps with administration - // 3. "exporter" image, which helps with monitoring - // 4. "pgbadger" image, which helps with...pgbadger + // 2. "exporter" image, which helps with monitoring + // 3. "pgbadger" image, which helps with...pgbadger switch container.Name { case "exporter": containerImageName = config.CONTAINER_IMAGE_CRUNCHY_POSTGRES_EXPORTER - case "crunchyadm": - containerImageName = config.CONTAINER_IMAGE_CRUNCHY_ADMIN case "database": containerImageName = config.CONTAINER_IMAGE_CRUNCHY_POSTGRES_HA // one more step here...determine if this is GIS enabled @@ -1008,7 +990,6 @@ func OverrideClusterContainerImages(containers []v1.Container) { // into the current buffer func writeTablespaceJSON(w *bytes.Buffer, jsonFields interface{}) error { json, err := json.Marshal(jsonFields) - // if there is an error, log the error and continue if err != nil { return err diff --git a/internal/operator/clusterutilities_test.go b/internal/operator/clusterutilities_test.go index 081e5eab62..ea0a37dbb9 100644 --- a/internal/operator/clusterutilities_test.go +++ b/internal/operator/clusterutilities_test.go @@ -1,7 +1,7 @@ package operator /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -123,16 +123,14 @@ func TestGetAnnotations(t *testing.T) { } func TestOverrideClusterContainerImages(t *testing.T) { - containerDefaults := map[string]struct { name string image string }{ - "database": {name: "database", image: config.CONTAINER_IMAGE_CRUNCHY_POSTGRES_HA}, - "crunchyadm": {name: "crunchyadm", image: config.CONTAINER_IMAGE_CRUNCHY_ADMIN}, - "exporter": {name: "exporter", image: config.CONTAINER_IMAGE_CRUNCHY_POSTGRES_EXPORTER}, - "pgbadger": {name: "pgbadger", image: config.CONTAINER_IMAGE_CRUNCHY_PGBADGER}, - "future": {name: "future", image: "crunchy-future"}, + "database": {name: "database", image: config.CONTAINER_IMAGE_CRUNCHY_POSTGRES_HA}, + "exporter": {name: "exporter", image: config.CONTAINER_IMAGE_CRUNCHY_POSTGRES_EXPORTER}, + "pgbadger": {name: "pgbadger", image: config.CONTAINER_IMAGE_CRUNCHY_PGBADGER}, + "future": {name: "future", image: "crunchy-future"}, } t.Run("no override", func(t *testing.T) { @@ -250,7 +248,6 @@ func TestOverrideClusterContainerImages(t *testing.T) { } func TestGetPgbackrestBootstrapS3EnvVars(t *testing.T) { - // create a fake client that will be used to "fake" the initialization of the operator for // this test fakePGOClient, err := fakekubeapi.NewFakePGOClient() @@ -283,7 +280,6 @@ func TestGetPgbackrestBootstrapS3EnvVars(t *testing.T) { // test all env vars are properly set according the contents of an existing pgBackRest // repo secret t.Run("populate from secret", func(t *testing.T) { - backRestRepoSecret := mockBackRestRepoSecret.DeepCopy() s3EnvVars := GetPgbackrestBootstrapS3EnvVars(defaultRestoreFromCluster, backRestRepoSecret) // massage the results a bit so that we can parse as proper JSON to validate contents @@ -332,7 +328,6 @@ func TestGetPgbackrestBootstrapS3EnvVars(t *testing.T) { // test that the proper default S3 URI style is set for the bootstrap S3 env vars when the // S3 URI style annotation is an empty string in a pgBackRest repo secret t.Run("default URI style", func(t *testing.T) { - // the expected default for the pgBackRest URI style defaultURIStyle := "host" diff --git a/internal/operator/common.go b/internal/operator/common.go index 2d4360deb7..ead5047318 100644 --- a/internal/operator/common.go +++ b/internal/operator/common.go @@ -1,7 +1,7 @@ package operator /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,9 +17,13 @@ package operator import ( "bytes" + "context" + "crypto/sha256" "encoding/json" + "fmt" + "io/ioutil" "os" - "strings" + "path" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/ns" @@ -27,20 +31,38 @@ import ( log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/kubernetes" ) const ( + // defaultBackrestRepoPath defines the default repo1-path for pgBackRest for + // use when a specic path is not provided in the pgcluster CR. The '%s' + // format verb will be replaced with the cluster name when this variable is + // utilized + defaultBackrestRepoPath = "/backrestrepo/%s-backrest-shared-repo" + // defaultBackrestRepoConfigPath contains the default configuration that are used + // to set up a pgBackRest repository + defaultBackrestRepoConfigPath = "/default-pgo-backrest-repo/" // defaultRegistry is the default registry to pull the container images from defaultRegistry = "registry.developers.crunchydata.com/crunchydata" + // legacyS3CASHA256Digest informs us if we should override the S3 CA with the + // new bundle + legacyS3CASHA256Digest = "d1c290ea1e4544dec1934931fbfa1fb2060eb3a0f2239ba191f444ecbce35cbb" ) -var CRUNCHY_DEBUG bool -var NAMESPACE string +var ( + CRUNCHY_DEBUG bool + NAMESPACE string +) -var InstallationName string -var PgoNamespace string -var EventTCPAddress = "localhost:4150" +var ( + InstallationName string + PgoNamespace string + EventTCPAddress = "localhost:4150" +) var Pgo config.PgoConfig @@ -54,6 +76,9 @@ var ContainerImageOverrides = map[string]string{} // for detailed explanations of each mode available. var namespaceOperatingMode ns.NamespaceOperatingMode +// runAsNonRoot forces the Pod to run as a non-root Pod +var runAsNonRoot = true + type containerResourcesTemplateFields struct { // LimitsMemory and LimitsCPU detemrine the memory/CPU limits LimitsMemory, LimitsCPU string @@ -62,8 +87,11 @@ type containerResourcesTemplateFields struct { RequestsMemory, RequestsCPU string } -func Initialize(clientset kubernetes.Interface) { +// defaultBackrestRepoConfigKeys are the default keys expected to be in the +// pgBackRest repo config secret +var defaultBackrestRepoConfigKeys = []string{"config", "sshd_config", "aws-s3-ca.crt"} +func Initialize(clientset kubernetes.Interface) { tmp := os.Getenv("CRUNCHY_DEBUG") if tmp == "true" { CRUNCHY_DEBUG = true @@ -89,16 +117,15 @@ func Initialize(clientset kubernetes.Interface) { os.Exit(2) } - var err error - - err = Pgo.GetConfig(clientset, PgoNamespace) - if err != nil { + if err := Pgo.GetConfig(clientset, PgoNamespace); err != nil { log.Error(err) - log.Error("pgo-config files and templates did not load") - os.Exit(2) + log.Fatal("pgo-config files and templates did not load") } - log.Printf("PrimaryStorage=%v\n", Pgo.Storage["storage1"]) + // initialize the general pgBackRest secret + if err := initializeOperatorBackrestSecret(clientset, PgoNamespace); err != nil { + log.Fatal(err) + } if Pgo.Cluster.CCPImagePrefix == "" { log.Debugf("pgo.yaml CCPImagePrefix not set, using default %q", defaultRegistry) @@ -113,11 +140,6 @@ func Initialize(clientset kubernetes.Interface) { log.Debugf("PGOImagePrefix set, using %s", Pgo.Pgo.PGOImagePrefix) } - if Pgo.Cluster.PgmonitorPassword == "" { - log.Debug("pgo.yaml PgmonitorPassword not set, using default") - Pgo.Cluster.PgmonitorPassword = "password" - } - // In a RELATED_IMAGE_* world, this does not _need_ to be set, but our // installer does set it up so we could be ok... if Pgo.Pgo.PGOImageTag == "" { @@ -151,12 +173,14 @@ func Initialize(clientset kubernetes.Interface) { func GetPodSecurityContext(supplementalGroups []int64) string { // set up the security context struct securityContext := v1.PodSecurityContext{ + // we don't want to run the pods as root, so explicitly disallow this + RunAsNonRoot: &runAsNonRoot, // add any supplemental groups that the user passed in SupplementalGroups: supplementalGroups, } // determine if we should use the PostgreSQL FSGroup. - if !Pgo.Cluster.DisableFSGroup { + if !Pgo.DisableFSGroup() { // we store the PostgreSQL FSGroup in this constant as an int64, so it's // just carried over securityContext.FSGroup = &crv1.PGFSGroup @@ -164,7 +188,6 @@ func GetPodSecurityContext(supplementalGroups []int64) string { // ...convert to JSON. Errors are ignored doc, err := json.Marshal(securityContext) - // if there happens to be an error, warn about it if err != nil { log.Warn(err) @@ -217,30 +240,62 @@ func GetResourcesJSON(resources, limits v1.ResourceList) string { } if log.GetLevel() == log.DebugLevel { - config.ContainerResourcesTemplate.Execute(os.Stdout, fields) + _ = config.ContainerResourcesTemplate.Execute(os.Stdout, fields) } return doc.String() } +// GetPGBackRestRepoPath is responsible for determining the repo path setting +// (i.e. 'repo1-path' flag) for use by pgBackRest. If a specific repo path has +// been defined in the pgcluster CR, then that path will be returned. Otherwise +// a default path will be returned that is generated from the cluster name +func GetPGBackRestRepoPath(cluster *crv1.Pgcluster) string { + if cluster.Spec.BackrestRepoPath != "" { + return cluster.Spec.BackrestRepoPath + } + return fmt.Sprintf(defaultBackrestRepoPath, cluster.Name) +} + // GetRepoType returns the proper repo type to set in container based on the // backrest storage type provided -func GetRepoType(backrestStorageType string) string { - if backrestStorageType != "" && backrestStorageType == "s3" { - return "s3" - } else { - return "posix" +// +// If there are multiple types, the default returned is "posix". This could +// change once there is proper multi-repo support, but with proper multi-repo +// support, this function is likely annhilated. +// +// If there is nothing, the default returned is posix +func GetRepoType(cluster *crv1.Pgcluster) crv1.BackrestStorageType { + // so...per the above comment... + if len(cluster.Spec.BackrestStorageTypes) == 0 || len(cluster.Spec.BackrestStorageTypes) > 1 { + return crv1.BackrestStorageTypePosix + } + + // alright, so there is only 1. If it happens to be "local" ensure that posix + // is returned + if cluster.Spec.BackrestStorageTypes[0] == crv1.BackrestStorageTypeLocal { + return crv1.BackrestStorageTypePosix } + + return cluster.Spec.BackrestStorageTypes[0] } // IsLocalAndS3Storage a boolean indicating whether or not local and s3 storage should // be enabled for pgBackRest based on the backrestStorageType string provided -func IsLocalAndS3Storage(backrestStorageType string) bool { - if backrestStorageType != "" && strings.Contains(backrestStorageType, "s3") && - strings.Contains(backrestStorageType, "local") { - return true +func IsLocalAndS3Storage(cluster *crv1.Pgcluster) bool { + // this works for the time being. if the counter is two or greater, then we + // have both local and S3 storage + i := 0 + + for _, storageType := range cluster.Spec.BackrestStorageTypes { + switch storageType { + default: // no -oop + case crv1.BackrestStorageTypeLocal, crv1.BackrestStorageTypePosix, crv1.BackrestStorageTypeS3: + i += 1 + } } - return false + + return i >= 2 } // SetContainerImageOverride determines if there is an override available for @@ -257,6 +312,49 @@ func SetContainerImageOverride(containerImageName string, container *v1.Containe } } +// getCandidatePod tries to get the candidate Pod for a switchover or failover. +// If "candidateName" is provided, it will seek out the specific PostgreSQL +// instance. Otherwise, it will just attempt to find a running Pod. +// +// If such a Pod cannot be found, we likely cannot use the instance for a +// switchover for failover candidate as it is not running. +func getCandidatePod(clientset kubernetes.Interface, cluster *crv1.Pgcluster, candidateName string) (*v1.Pod, error) { + ctx := context.TODO() + + // build the label selector. we are looking for any PostgreSQL instance within + // this cluster, so that part is easy + labelSelector := fields.Set{ + config.LABEL_PG_CLUSTER: cluster.Name, + config.LABEL_PG_DATABASE: config.LABEL_TRUE, + } + + // if a candidateName is supplied, use that as part of the label selector to + // find the candidate Pod + if candidateName != "" { + labelSelector[config.LABEL_DEPLOYMENT_NAME] = candidateName + } + + // ensure the Pod is part of the cluster and is running + options := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), + LabelSelector: labelSelector.String(), + } + + pods, err := clientset.CoreV1().Pods(cluster.Namespace).List(ctx, options) + if err != nil { + return nil, err + } + + // if no Pods are found, then also return an error as we then cannot switch + // over to this instance + if len(pods.Items) == 0 { + return nil, fmt.Errorf("no pods found for instance %s", candidateName) + } + + // the list returns multiple Pods, so just return the first one + return &pods.Items[0], nil +} + // initializeContainerImageOverrides initializes the container image overrides // that could be set if there are any `RELATED_IMAGE_*` environmental variables func initializeContainerImageOverrides() { @@ -308,7 +406,6 @@ func initializeControllerRefreshIntervals() { // attempting to utilize the worker counts defined in the pgo.yaml config file, and if not // present then falling back to a default value. func initializeControllerWorkerCounts() { - if Pgo.Pgo.ConfigMapWorkerCount == nil { log.Debugf("ConfigMapWorkerCount not set, defaulting to %d worker(s)", config.DefaultConfigMapWorkerCount) @@ -360,12 +457,87 @@ func initializeControllerWorkerCounts() { } } +// initializeOperatorBackrestSecret ensures the generic pgBackRest configuration +// is available +func initializeOperatorBackrestSecret(clientset kubernetes.Interface, namespace string) error { + var isNew, isModified bool + + ctx := context.TODO() + + // determine if the Secret already exists + secret, err := clientset. + CoreV1().Secrets(namespace). + Get(ctx, config.SecretOperatorBackrestRepoConfig, metav1.GetOptions{}) + // if there is a true error, return. Otherwise, initialize a new Secret + if err != nil { + if !kerrors.IsNotFound(err) { + return err + } + + secret = &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.SecretOperatorBackrestRepoConfig, + Labels: map[string]string{ + config.LABEL_VENDOR: config.LABEL_CRUNCHY, + }, + }, + Data: map[string][]byte{}, + } + isNew = true + } + + // set any missing defaults + for _, filename := range defaultBackrestRepoConfigKeys { + // skip if there is already content, unless this is aws-s3-ca.crt due to + // the change in the CA bundle + if len(secret.Data[filename]) != 0 { + if filename != "aws-s3-ca.crt" { + continue + } + + // in the case of aws-s3-ca.crt, check that this is the default + // certificate. if it is, override it + if fmt.Sprintf("%x", sha256.Sum256(secret.Data[filename])) != legacyS3CASHA256Digest { + continue + } + } + + file := path.Join(defaultBackrestRepoConfigPath, filename) + + // if we can't read the contents of the file for whatever reason, warn, + // but continue + // otherwise, update the entry in the Secret + if contents, err := ioutil.ReadFile(file); err != nil { + log.Warn(err) + continue + } else { + secret.Data[filename] = contents + } + + isModified = true + } + + // do not make any updates if the secret is not modified at all + if !isModified { + return nil + } + + // make the API calls based on if we are creating or updating + if isNew { + _, err := clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) + return err + } + + _, err = clientset.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{}) + + return err +} + // SetupNamespaces is responsible for the initial namespace configuration for the Operator // install. This includes setting the proper namespace operating mode, creating and/or updating // namespaces as needed (or as permitted by the current operator mode), and returning a valid list // of namespaces for the current Operator install. func SetupNamespaces(clientset kubernetes.Interface) ([]string, error) { - // First set the proper namespace operating mode for the Operator install. The mode identified // determines whether or not certain namespace capabilities are enabled. if err := setNamespaceOperatingMode(clientset); err != nil { diff --git a/internal/operator/common_test.go b/internal/operator/common_test.go new file mode 100644 index 0000000000..40e1c39374 --- /dev/null +++ b/internal/operator/common_test.go @@ -0,0 +1,165 @@ +package operator + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "testing" + + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" +) + +func TestGetRepoType(t *testing.T) { + cluster := &crv1.Pgcluster{ + Spec: crv1.PgclusterSpec{}, + } + + t.Run("empty list returns posix", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = make([]crv1.BackrestStorageType, 0) + + expected := crv1.BackrestStorageTypePosix + actual := GetRepoType(cluster) + if expected != actual { + t.Fatalf("expected %q, actual %q", expected, actual) + } + }) + + t.Run("multiple list returns posix", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{ + crv1.BackrestStorageTypeS3, + crv1.BackrestStorageTypePosix, + } + + expected := crv1.BackrestStorageTypePosix + actual := GetRepoType(cluster) + if expected != actual { + t.Fatalf("expected %q, actual %q", expected, actual) + } + }) + + t.Run("local returns posix", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{ + crv1.BackrestStorageTypeLocal, + } + + expected := crv1.BackrestStorageTypePosix + actual := GetRepoType(cluster) + if expected != actual { + t.Fatalf("expected %q, actual %q", expected, actual) + } + }) + + t.Run("posix returns posix", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{ + crv1.BackrestStorageTypePosix, + } + + expected := crv1.BackrestStorageTypePosix + actual := GetRepoType(cluster) + if expected != actual { + t.Fatalf("expected %q, actual %q", expected, actual) + } + }) + + t.Run("s3 returns s3", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{ + crv1.BackrestStorageTypeS3, + } + + expected := crv1.BackrestStorageTypeS3 + actual := GetRepoType(cluster) + if expected != actual { + t.Fatalf("expected %q, actual %q", expected, actual) + } + }) +} + +func TestIsLocalAndS3Storage(t *testing.T) { + cluster := &crv1.Pgcluster{ + Spec: crv1.PgclusterSpec{}, + } + + t.Run("empty list returns false", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = make([]crv1.BackrestStorageType, 0) + + expected := false + actual := IsLocalAndS3Storage(cluster) + if expected != actual { + t.Fatalf("expected %t, actual %t", expected, actual) + } + }) + + t.Run("posix only returns false", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{ + crv1.BackrestStorageTypePosix, + } + + expected := false + actual := IsLocalAndS3Storage(cluster) + if expected != actual { + t.Fatalf("expected %t, actual %t", expected, actual) + } + }) + + t.Run("local only returns false", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{ + crv1.BackrestStorageTypeLocal, + } + + expected := false + actual := IsLocalAndS3Storage(cluster) + if expected != actual { + t.Fatalf("expected %t, actual %t", expected, actual) + } + }) + + t.Run("s3 only returns false", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{ + crv1.BackrestStorageTypeS3, + } + + expected := false + actual := IsLocalAndS3Storage(cluster) + if expected != actual { + t.Fatalf("expected %t, actual %t", expected, actual) + } + }) + + t.Run("posix and s3 returns true", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{ + crv1.BackrestStorageTypePosix, + crv1.BackrestStorageTypeS3, + } + + expected := true + actual := IsLocalAndS3Storage(cluster) + if expected != actual { + t.Fatalf("expected %t, actual %t", expected, actual) + } + }) + + t.Run("local and s3 returns true", func(t *testing.T) { + cluster.Spec.BackrestStorageTypes = []crv1.BackrestStorageType{ + crv1.BackrestStorageTypeLocal, + crv1.BackrestStorageTypeS3, + } + + expected := true + actual := IsLocalAndS3Storage(cluster) + if expected != actual { + t.Fatalf("expected %t, actual %t", expected, actual) + } + }) +} diff --git a/internal/operator/config/configutil.go b/internal/operator/config/configutil.go index 1cbefba2ac..3efc10d57b 100644 --- a/internal/operator/config/configutil.go +++ b/internal/operator/config/configutil.go @@ -1,7 +1,7 @@ package config /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -42,10 +42,8 @@ const ( pghLocalConfigSuffix = "-local-config" ) -var ( - // ErrMissingClusterConfig is the error thrown when configuration is missing from a configMap - ErrMissingClusterConfig error = errors.New("Configuration is missing from configMap") -) +// ErrMissingClusterConfig is the error thrown when configuration is missing from a configMap +var ErrMissingClusterConfig error = errors.New("Configuration is missing from configMap") // Syncer defines a resource that is able to sync its configuration stored configuration with a // service, application, etc. @@ -53,9 +51,9 @@ type Syncer interface { Sync() error } -// patchConfigMapData replaces the configuration stored the configuration specified with the +// PatchConfigMapData replaces the configuration stored the configuration specified with the // provided content -func patchConfigMapData(kubeclientset kubernetes.Interface, configMap *corev1.ConfigMap, +func PatchConfigMapData(kubeclientset kubernetes.Interface, configMap *corev1.ConfigMap, configName string, content []byte) error { ctx := context.TODO() diff --git a/internal/operator/config/dcs.go b/internal/operator/config/dcs.go index dc7663acbc..43ad7d7ac5 100644 --- a/internal/operator/config/dcs.go +++ b/internal/operator/config/dcs.go @@ -1,7 +1,7 @@ package config /* - Copyright 2020 Crunchy Data Solutions, Ind. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -99,7 +99,6 @@ type SlotDCS struct { // include a configMap that will be used to configure the DCS for a specific cluster. func NewDCS(configMap *corev1.ConfigMap, kubeclientset kubernetes.Interface, clusterScope string) *DCS { - clusterName := configMap.GetLabels()[config.LABEL_PG_CLUSTER] return &DCS{ @@ -114,7 +113,6 @@ func NewDCS(configMap *corev1.ConfigMap, kubeclientset kubernetes.Interface, // configuration is missing from the configMap, then and attempt is made to add it by refreshing // the DCS configuration. func (d *DCS) Sync() error { - clusterName := d.configMap.GetObjectMeta().GetLabels()[config.LABEL_PG_CLUSTER] namespace := d.configMap.GetObjectMeta().GetNamespace() @@ -123,7 +121,6 @@ func (d *DCS) Sync() error { if err := d.apply(); err != nil && errors.Is(err, ErrMissingClusterConfig) { - if err := d.refresh(); err != nil { return err } @@ -140,7 +137,6 @@ func (d *DCS) Sync() error { // Update updates the contents of the DCS configuration stored within the configMap included // in the DCS. func (d *DCS) Update(dcsConfig *DCSConfig) error { - clusterName := d.configMap.GetObjectMeta().GetLabels()[config.LABEL_PG_CLUSTER] namespace := d.configMap.GetObjectMeta().GetNamespace() @@ -152,7 +148,7 @@ func (d *DCS) Update(dcsConfig *DCSConfig) error { return err } - if err := patchConfigMapData(d.kubeclientset, d.configMap, d.configName, content); err != nil { + if err := PatchConfigMapData(d.kubeclientset, d.configMap, d.configName, content); err != nil { return err } @@ -167,7 +163,6 @@ func (d *DCS) Update(dcsConfig *DCSConfig) error { // "-config" configMap, with the contents of the "" // configuration included in the DCS's configMap. func (d *DCS) apply() error { - clusterName := d.configMap.GetLabels()[config.LABEL_PG_CLUSTER] namespace := d.configMap.GetObjectMeta().GetNamespace() @@ -250,7 +245,6 @@ func (d *DCS) getClusterDCSConfig() (*DCSConfig, map[string]json.RawMessage, err // configMap, i.e. the contents of the "" configuration unmarshalled // into a DCSConfig struct. func (d *DCS) GetDCSConfig() (*DCSConfig, map[string]json.RawMessage, error) { - dcsYAML, ok := d.configMap.Data[d.configName] if !ok { return nil, nil, ErrMissingClusterConfig @@ -291,7 +285,6 @@ func (d *DCS) patchDCSAnnotation(content string) error { // configMap with the current DCS configuration for the cluster. Specifically, it is updated with // the configuration stored in the "config" annotation of the "-config" configMap. func (d *DCS) refresh() error { - clusterName := d.configMap.Labels[config.LABEL_PG_CLUSTER] namespace := d.configMap.GetObjectMeta().GetNamespace() @@ -308,7 +301,7 @@ func (d *DCS) refresh() error { return err } - if err := patchConfigMapData(d.kubeclientset, d.configMap, d.configName, + if err := PatchConfigMapData(d.kubeclientset, d.configMap, d.configName, clusterDCSBytes); err != nil { return err } diff --git a/internal/operator/config/localdb.go b/internal/operator/config/localdb.go index 797c53544f..b8326c3ade 100644 --- a/internal/operator/config/localdb.go +++ b/internal/operator/config/localdb.go @@ -1,7 +1,7 @@ package config /* - Copyright 2020 Crunchy Data Solutions, Inl. + Copyright 2020 - 2023 Crunchy Data Solutions, Inl. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -38,18 +38,19 @@ import ( var ( // readConfigCMD is the command used to read local cluster configuration in a database // container - readConfigCMD []string = []string{"bash", "-c", - "/opt/cpm/bin/yq r /tmp/postgres-ha-bootstrap.yaml postgresql | " + - "/opt/cpm/bin/yq p - postgresql", + readConfigCMD []string = []string{ + "bash", "-c", + "/opt/crunchy/bin/yq r /tmp/postgres-ha-bootstrap.yaml postgresql | " + + "/opt/crunchy/bin/yq p - postgresql", } // applyAndReloadConfigCMD is the command for calling the script to apply and reload the local // configuration for a database container. The required arguments are appended to this command // when the script is called. - applyAndReloadConfigCMD []string = []string{"/opt/cpm/bin/common/pgha-reload-local.sh"} + applyAndReloadConfigCMD []string = []string{"/opt/crunchy/bin/postgres-ha/common/pgha-reload-local.sh"} - // pghaLocalConfigName represents the name of the local configuration stored for each database + // PGHALocalConfigName represents the name of the local configuration stored for each database // server in the "-pgha-config" configMap, which is "-local-config" - pghaLocalConfigName = "%s-local-config" + PGHALocalConfigName = "%s-local-config" // pghaLocalConfigSuffix is the suffix for a local server configuration pghaLocalConfigSuffix = "-local-config" ) @@ -120,7 +121,6 @@ type CreateReplicaMethod struct { // servers. func NewLocalDB(configMap *corev1.ConfigMap, restConfig *rest.Config, kubeclientset kubernetes.Interface) (*LocalDB, error) { - clusterName := configMap.GetLabels()[config.LABEL_PG_CLUSTER] namespace := configMap.GetObjectMeta().GetNamespace() @@ -142,7 +142,6 @@ func NewLocalDB(configMap *corev1.ConfigMap, restConfig *rest.Config, // configMap, then and attempt is made to add it by refreshing that specific configuration. Also, any // configurations within the configMap associated with servers that no longer exist are removed. func (l *LocalDB) Sync() error { - clusterName := l.configMap.GetObjectMeta().GetLabels()[config.LABEL_PG_CLUSTER] namespace := l.configMap.GetObjectMeta().GetNamespace() @@ -156,7 +155,7 @@ func (l *LocalDB) Sync() error { // delete any configs that are in the configMap but don't have an associated DB server in the // cluster go func() { - l.clean() + _ = l.clean() wg.Done() }() @@ -166,11 +165,9 @@ func (l *LocalDB) Sync() error { wg.Add(1) go func(config string) { - // attempt to apply DCS config if err := l.apply(config); err != nil && errors.Is(err, ErrMissingClusterConfig) { - if err := l.refresh(config); err != nil { // log the error and move on log.Error(err) @@ -195,7 +192,6 @@ func (l *LocalDB) Sync() error { // Update updates the contents of the configuration for a specific database server in // the PG cluster, specifically within the configMap included in the LocalDB. func (l *LocalDB) Update(configName string, localDBConfig LocalDBConfig) error { - clusterName := l.configMap.GetObjectMeta().GetLabels()[config.LABEL_PG_CLUSTER] namespace := l.configMap.GetObjectMeta().GetNamespace() @@ -207,7 +203,7 @@ func (l *LocalDB) Update(configName string, localDBConfig LocalDBConfig) error { return err } - if err := patchConfigMapData(l.kubeclientset, l.configMap, configName, content); err != nil { + if err := PatchConfigMapData(l.kubeclientset, l.configMap, configName, content); err != nil { return err } @@ -255,7 +251,6 @@ func (l *LocalDB) apply(configName string) error { stdout, stderr, err := kubeapi.ExecToPodThroughAPI(l.restConfig, l.kubeclientset, applyCommand, dbPod.Spec.Containers[0].Name, dbPod.GetName(), namespace, nil) - if err != nil { log.Error(stderr, stdout) return err @@ -271,7 +266,7 @@ func (l *LocalDB) apply(configName string) error { // LocalDB if the database server they are associated with no longer exists func (l *LocalDB) clean() error { ctx := context.TODO() - var patch = kubeapi.NewJSONPatch() + patch := kubeapi.NewJSONPatch() var cmlocalConfigs []string // first grab all current local configs from the configMap @@ -320,7 +315,6 @@ func (l *LocalDB) getLocalConfigFromCluster(configName string) (*LocalDBConfig, dbPodList, err := l.kubeclientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: selector, }) - if err != nil { return nil, err } @@ -353,7 +347,6 @@ func (l *LocalDB) getLocalConfigFromCluster(configName string) (*LocalDBConfig, // configMap for a specific database server, i.e. the contents of the "" // configuration unmarshalled into a LocalConfig struct. func (l *LocalDB) getLocalConfig(configName string) (string, error) { - localYAML, ok := l.configMap.Data[configName] if !ok { return "", ErrMissingClusterConfig @@ -379,7 +372,6 @@ func (l *LocalDB) getLocalConfig(configName string) (string, error) { // with the contents of the Patroni YAML configuration file stored in the container running the // server. func (l *LocalDB) refresh(configName string) error { - clusterName := l.configMap.GetObjectMeta().GetLabels()[config.LABEL_PG_CLUSTER] namespace := l.configMap.GetObjectMeta().GetNamespace() @@ -396,7 +388,7 @@ func (l *LocalDB) refresh(configName string) error { return err } - if err := patchConfigMapData(l.kubeclientset, l.configMap, configName, + if err := PatchConfigMapData(l.kubeclientset, l.configMap, configName, localConfigYAML); err != nil { return err } @@ -426,7 +418,7 @@ func GetLocalDBConfigNames(kubeclientset kubernetes.Interface, clusterName, localConfigNames := make([]string, len(dbDeploymentList.Items)) for i, deployment := range dbDeploymentList.Items { - localConfigNames[i] = fmt.Sprintf(pghaLocalConfigName, deployment.GetName()) + localConfigNames[i] = fmt.Sprintf(PGHALocalConfigName, deployment.GetName()) } return localConfigNames, nil diff --git a/internal/operator/failover.go b/internal/operator/failover.go new file mode 100644 index 0000000000..e75e914466 --- /dev/null +++ b/internal/operator/failover.go @@ -0,0 +1,145 @@ +package operator + +/* + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "context" + "fmt" + + "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/kubeapi" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +var roleChangeCmd = []string{"patronictl", "edit-config", "--force", + "--set", "tags.primary_on_role_change=null"} + +// Failover performs a failover to a PostgreSQL cluster, which is effectively +// a "forced switchover." In other words, failover will force ensure that +// there is a primary available. +// +// NOTE: This is reserve as the "last resort" case. If you want a controlled +// failover, you want "Switchover". +// +// A target must be specified. The target should contain the name of the target +// instances (Deployment), is not empty then we will attempt to locate that +// target Pod. +// +// The target Pod name, called the candidate is passed into the failover +// command generation function, and then is ultimately used in the failover. +func Failover(clientset kubernetes.Interface, restConfig *rest.Config, cluster *crv1.Pgcluster, target string) error { + // ensure target is not empty + if target == "" { + return fmt.Errorf("failover requires a target instance to be specified.") + } + + // When the target is specified, we will attempt to get the Pod that + // represents that target. + // + // If it is not specified, then we will attempt to get any Pod. + // + // If either errors, we will return an error + pod, err := getCandidatePod(clientset, cluster, target) + + if err != nil { + return err + } + + candidate := pod.Name + + // generate the command + cmd := generatePostgresFailoverCommand(cluster.Name, candidate) + + // good to generally log which instances are being used in the failover + log.Infof("failover started for cluster %q", cluster.Name) + + if _, stderr, err := kubeapi.ExecToPodThroughAPI(restConfig, clientset, + cmd, "database", pod.Name, cluster.Namespace, nil); err != nil { + return fmt.Errorf(stderr) + } + + log.Infof("failover completed for cluster %q", cluster.Name) + + // and that's all + return nil +} + +// RemovePrimaryOnRoleChangeTag sets the 'primary_on_role_change' tag to null in the +// Patroni DCS, effectively removing the tag. This is accomplished by exec'ing into +// the primary PG pod, and sending a patch request to update the appropriate data (i.e. +// the 'primary_on_role_change' tag) in the DCS. +func RemovePrimaryOnRoleChangeTag(clientset kubernetes.Interface, restconfig *rest.Config, + clusterName, namespace string) error { + ctx := context.TODO() + + selector := config.LABEL_PG_CLUSTER + "=" + clusterName + + "," + config.LABEL_PGHA_ROLE + "=" + config.LABEL_PGHA_ROLE_PRIMARY + + // only consider pods that are running + options := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("status.phase", string(v1.PodRunning)).String(), + LabelSelector: selector, + } + + pods, err := clientset.CoreV1().Pods(namespace).List(ctx, options) + + if err != nil { + log.Error(err) + return err + } else if len(pods.Items) == 0 { + return fmt.Errorf("no pods found for cluster %q", clusterName) + } else if len(pods.Items) > 1 { + log.Error("More than one primary found after completing the post-failover backup") + } + pod := pods.Items[0] + + // execute the command that will be run on the pod selected for the failover + // in order to trigger the failover and promote that specific pod to primary + log.Debugf("running Exec command '%s' with namespace=[%s] podname=[%s] container name=[%s]", + roleChangeCmd, namespace, pod.Name, pod.Spec.Containers[0].Name) + stdout, stderr, err := kubeapi.ExecToPodThroughAPI(restconfig, clientset, roleChangeCmd, + pod.Spec.Containers[0].Name, pod.Name, namespace, nil) + log.Debugf("stdout=[%s] stderr=[%s]", stdout, stderr) + if err != nil { + log.Error(err) + return err + } + return nil +} + +// generatePostgresFailoverCommand creates the command that is used to issue +// a failover command (ensure that there is a promoted primary). +// +// There are two ways to run this command: +// +// 1. Pass in only a clusterName. Patroni will select the best candidate +// 2. Pass in a clusterName AND a target candidate name, which has to be the +// name of a Pod +func generatePostgresFailoverCommand(clusterName, candidate string) []string { + cmd := []string{"patronictl", "failover", "--force", clusterName} + + if candidate != "" { + cmd = append(cmd, "--candidate", candidate) + } + + return cmd +} diff --git a/internal/operator/failover_test.go b/internal/operator/failover_test.go new file mode 100644 index 0000000000..c2a7a78a7e --- /dev/null +++ b/internal/operator/failover_test.go @@ -0,0 +1,45 @@ +package operator + +/* + Copyright 2021 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "reflect" + "testing" +) + +func TestGeneratePostgresFailoverCommand(t *testing.T) { + clusterName := "hippo" + candidate := "" + + t.Run("no candidate", func(t *testing.T) { + expected := []string{"patronictl", "failover", "--force", clusterName} + actual := generatePostgresFailoverCommand(clusterName, candidate) + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v actual: %v", expected, actual) + } + }) + + t.Run("candidate", func(t *testing.T) { + candidate = "hippo-abc-123" + expected := []string{"patronictl", "failover", "--force", clusterName, "--candidate", candidate} + actual := generatePostgresFailoverCommand(clusterName, candidate) + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v actual: %v", expected, actual) + } + }) +} diff --git a/internal/operator/operatorupgrade/version-check.go b/internal/operator/operatorupgrade/version-check.go index dce4196634..612dd264e8 100644 --- a/internal/operator/operatorupgrade/version-check.go +++ b/internal/operator/operatorupgrade/version-check.go @@ -1,7 +1,7 @@ package operatorupgrade /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -45,7 +45,8 @@ func CheckVersion(clientset pgo.Interface, ns string) error { } // where the Operator versions do not match, label the pgclusters accordingly - for _, cluster := range clusterList.Items { + for i := range clusterList.Items { + cluster := &clusterList.Items[i] if msgs.PGO_VERSION != cluster.Spec.UserLabels[config.LABEL_PGO_VERSION] { log.Infof("operator version check - pgcluster %s version is currently %s, current version is %s", cluster.Name, cluster.Spec.UserLabels[config.LABEL_PGO_VERSION], msgs.PGO_VERSION) // check if the annotations map has been created @@ -54,8 +55,7 @@ func CheckVersion(clientset pgo.Interface, ns string) error { cluster.Annotations = map[string]string{} } cluster.Annotations[config.ANNOTATION_IS_UPGRADED] = config.ANNOTATIONS_FALSE - _, err = clientset.CrunchydataV1().Pgclusters(ns).Update(ctx, &cluster, metav1.UpdateOptions{}) - if err != nil { + if _, err := clientset.CrunchydataV1().Pgclusters(ns).Update(ctx, cluster, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("%s: %w", ErrUnsuccessfulVersionCheck, err) } } @@ -69,7 +69,8 @@ func CheckVersion(clientset pgo.Interface, ns string) error { } // where the Operator versions do not match, label the replicas accordingly - for _, replica := range replicaList.Items { + for i := range replicaList.Items { + replica := &replicaList.Items[i] if msgs.PGO_VERSION != replica.Spec.UserLabels[config.LABEL_PGO_VERSION] { log.Infof("operator version check - pgcluster replica %s version is currently %s, current version is %s", replica.Name, replica.Spec.UserLabels[config.LABEL_PGO_VERSION], msgs.PGO_VERSION) // check if the annotations map has been created @@ -78,8 +79,7 @@ func CheckVersion(clientset pgo.Interface, ns string) error { replica.Annotations = map[string]string{} } replica.Annotations[config.ANNOTATION_IS_UPGRADED] = config.ANNOTATIONS_FALSE - _, err = clientset.CrunchydataV1().Pgreplicas(ns).Update(ctx, &replica, metav1.UpdateOptions{}) - if err != nil { + if _, err := clientset.CrunchydataV1().Pgreplicas(ns).Update(ctx, replica, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("%s: %w", ErrUnsuccessfulVersionCheck, err) } } diff --git a/internal/operator/pgbackrest.go b/internal/operator/pgbackrest.go index 42a8f645d1..d46b91eed0 100644 --- a/internal/operator/pgbackrest.go +++ b/internal/operator/pgbackrest.go @@ -1,7 +1,7 @@ package operator /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -59,8 +59,8 @@ func addBackRestConfigDirectoryVolumeAndMounts(podSpec *v1.PodSpec, volumeName s // Any projections are included as custom pgBackRest configuration. func AddBackRestConfigVolumeAndMounts(podSpec *v1.PodSpec, clusterName string, projections []v1.VolumeProjection) { var combined []v1.VolumeProjection - var defaultConfigNames = clusterName + "-config-backrest" - var varTrue = true + defaultConfigNames := clusterName + "-config-backrest" + varTrue := true // Start with custom configurations from the CRD. combined = append(combined, projections...) diff --git a/internal/operator/pgbackrest_test.go b/internal/operator/pgbackrest_test.go index 046d2be770..ed4e11085b 100644 --- a/internal/operator/pgbackrest_test.go +++ b/internal/operator/pgbackrest_test.go @@ -1,7 +1,7 @@ package operator /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/operator/pgdump/dump.go b/internal/operator/pgdump/dump.go index 940713b3a0..a124c5314d 100644 --- a/internal/operator/pgdump/dump.go +++ b/internal/operator/pgdump/dump.go @@ -1,7 +1,7 @@ package pgdump /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -53,6 +53,7 @@ type pgDumpJobTemplateFields struct { PgDumpFilename string PgDumpAll string PgDumpPVC string + Tolerations string } // Dump ... @@ -60,7 +61,7 @@ func Dump(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { ctx := context.TODO() var err error - //create the Job to run the pgdump command + // create the Job to run the pgdump command cmd := task.Spec.Parameters[config.LABEL_PGDUMP_COMMAND] @@ -102,15 +103,16 @@ func Dump(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { jobName := taskName + "-" + util.RandStringBytesRmndr(4) jobFields := pgDumpJobTemplateFields{ - JobName: jobName, - TaskName: taskName, - ClusterName: task.Spec.Parameters[config.LABEL_PG_CLUSTER], - PodName: task.Spec.Parameters[config.LABEL_POD_NAME], - SecurityContext: operator.GetPodSecurityContext(task.Spec.StorageSpec.GetSupplementalGroups()), - Command: cmd, //?? - CommandOpts: task.Spec.Parameters[config.LABEL_PGDUMP_OPTS], - CCPImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix), - CCPImageTag: operator.Pgo.Cluster.CCPImageTag, + JobName: jobName, + TaskName: taskName, + ClusterName: task.Spec.Parameters[config.LABEL_PG_CLUSTER], + PodName: task.Spec.Parameters[config.LABEL_POD_NAME], + SecurityContext: operator.GetPodSecurityContext(task.Spec.StorageSpec.GetSupplementalGroups()), + Command: cmd, //?? + CommandOpts: task.Spec.Parameters[config.LABEL_PGDUMP_OPTS], + CCPImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix), + CCPImageTag: util.GetValueOrDefault(util.GetStandardImageTag(cluster.Spec.CCPImage, cluster.Spec.CCPImageTag), + operator.Pgo.Cluster.CCPImageTag), PgDumpHost: task.Spec.Parameters[config.LABEL_PGDUMP_HOST], PgDumpUserSecret: task.Spec.Parameters[config.LABEL_PGDUMP_USER], PgDumpDB: task.Spec.Parameters[config.LABEL_PGDUMP_DB], @@ -118,6 +120,7 @@ func Dump(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { PgDumpOpts: task.Spec.Parameters[config.LABEL_PGDUMP_OPTS], PgDumpAll: task.Spec.Parameters[config.LABEL_PGDUMP_ALL], PgDumpPVC: pvcName, + Tolerations: util.GetTolerations(cluster.Spec.Tolerations), } var doc2 bytes.Buffer @@ -128,7 +131,7 @@ func Dump(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { } if operator.CRUNCHY_DEBUG { - config.PgDumpBackupJobTemplate.Execute(os.Stdout, jobFields) + _ = config.PgDumpBackupJobTemplate.Execute(os.Stdout, jobFields) } newjob := v1batch.Job{} @@ -139,7 +142,7 @@ func Dump(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { } // set the container image to an override value, if one exists - operator.SetContainerImageOverride(config.CONTAINER_IMAGE_CRUNCHY_PGDUMP, + operator.SetContainerImageOverride(config.CONTAINER_IMAGE_CRUNCHY_POSTGRES_HA, &newjob.Spec.Template.Spec.Containers[0]) _, err = clientset.BatchV1().Jobs(namespace).Create(ctx, &newjob, metav1.CreateOptions{}) @@ -148,7 +151,7 @@ func Dump(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { return } - //update the pgdump task status to submitted - updates task, not the job. + // update the pgdump task status to submitted - updates task, not the job. patch, err := kubeapi.NewJSONPatch().Add("spec", "status")(crv1.PgBackupJobSubmitted).Bytes() if err == nil { log.Debugf("patching task %s: %s", task.Spec.Name, patch) @@ -158,5 +161,4 @@ func Dump(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { if err != nil { log.Error(err.Error()) } - } diff --git a/internal/operator/pgdump/restore.go b/internal/operator/pgdump/restore.go index 57cc0f7b12..43cce4d4c7 100644 --- a/internal/operator/pgdump/restore.go +++ b/internal/operator/pgdump/restore.go @@ -1,7 +1,7 @@ package pgdump /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -28,8 +28,10 @@ import ( "github.com/crunchydata/postgres-operator/internal/operator/pvc" "github.com/crunchydata/postgres-operator/internal/util" crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + log "github.com/sirupsen/logrus" v1batch "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -49,6 +51,7 @@ type restorejobTemplateFields struct { CCPImageTag string PgPort string NodeSelector string + Tolerations string } // Restore ... @@ -72,10 +75,21 @@ func Restore(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { return } - //use the storage config from the primary PostgreSQL cluster + // use the storage config from the primary PostgreSQL cluster storage := cluster.Spec.PrimaryStorage taskName := task.Name + var nodeAffinity *v1.NodeAffinity + + if task.Spec.Parameters["NodeLabelKey"] != "" && task.Spec.Parameters["NodeLabelValue"] != "" { + affinityType := crv1.NodeAffinityTypePreferred + if task.Spec.Parameters[config.LABEL_NODE_AFFINITY_TYPE] == "required" { + affinityType = crv1.NodeAffinityTypeRequired + } + + nodeAffinity = util.GenerateNodeAffinity(affinityType, + task.Spec.Parameters["NodeLabelKey"], []string{task.Spec.Parameters["NodeLabelValue"]}) + } jobFields := restorejobTemplateFields{ JobName: fmt.Sprintf("pgrestore-%s-%s", task.Spec.Parameters[config.LABEL_PGRESTORE_FROM_CLUSTER], @@ -91,8 +105,10 @@ func Restore(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { PGRestoreOpts: task.Spec.Parameters[config.LABEL_PGRESTORE_OPTS], PITRTarget: task.Spec.Parameters[config.LABEL_PGRESTORE_PITR_TARGET], CCPImagePrefix: util.GetValueOrDefault(cluster.Spec.CCPImagePrefix, operator.Pgo.Cluster.CCPImagePrefix), - CCPImageTag: operator.Pgo.Cluster.CCPImageTag, - NodeSelector: operator.GetAffinity(task.Spec.Parameters["NodeLabelKey"], task.Spec.Parameters["NodeLabelValue"], "In"), + CCPImageTag: util.GetValueOrDefault(util.GetStandardImageTag(cluster.Spec.CCPImage, cluster.Spec.CCPImageTag), + operator.Pgo.Cluster.CCPImageTag), + NodeSelector: operator.GetNodeAffinity(nodeAffinity), + Tolerations: util.GetTolerations(cluster.Spec.Tolerations), } var doc2 bytes.Buffer @@ -104,7 +120,7 @@ func Restore(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { } if operator.CRUNCHY_DEBUG { - config.PgRestoreJobTemplate.Execute(os.Stdout, jobFields) + _ = config.PgRestoreJobTemplate.Execute(os.Stdout, jobFields) } newjob := v1batch.Job{} @@ -115,7 +131,7 @@ func Restore(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { } // set the container image to an override value, if one exists - operator.SetContainerImageOverride(config.CONTAINER_IMAGE_CRUNCHY_PGRESTORE, + operator.SetContainerImageOverride(config.CONTAINER_IMAGE_CRUNCHY_POSTGRES_HA, &newjob.Spec.Template.Spec.Containers[0]) j, err := clientset.BatchV1().Jobs(namespace).Create(ctx, &newjob, metav1.CreateOptions{}) @@ -125,5 +141,4 @@ func Restore(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { return } log.Debugf("pgrestore job %s created", j.Name) - } diff --git a/internal/operator/pvc/pvc.go b/internal/operator/pvc/pvc.go index 24304b895a..dd530933ec 100644 --- a/internal/operator/pvc/pvc.go +++ b/internal/operator/pvc/pvc.go @@ -1,7 +1,7 @@ package pvc /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -152,7 +152,7 @@ func Create(clientset kubernetes.Interface, name, clusterName string, storageSpe log.Debug("using dynamic PVC template") err = config.PVCStorageClassTemplate.Execute(&doc2, pvcFields) if operator.CRUNCHY_DEBUG { - config.PVCStorageClassTemplate.Execute(os.Stdout, pvcFields) + _ = config.PVCStorageClassTemplate.Execute(os.Stdout, pvcFields) } } else { log.Debugf("matchlabels from spec is [%s]", storageSpec.MatchLabels) @@ -168,7 +168,7 @@ func Create(clientset kubernetes.Interface, name, clusterName string, storageSpe err = config.PVCTemplate.Execute(&doc2, pvcFields) if operator.CRUNCHY_DEBUG { - config.PVCTemplate.Execute(os.Stdout, pvcFields) + _ = config.PVCTemplate.Execute(os.Stdout, pvcFields) } } if err != nil { @@ -217,7 +217,6 @@ func Exists(clientset kubernetes.Interface, name string, namespace string) bool } func getMatchLabels(key, value string) string { - matchLabelsTemplateFields := matchLabelsTemplateFields{} matchLabelsTemplateFields.Key = key matchLabelsTemplateFields.Value = value @@ -230,5 +229,4 @@ func getMatchLabels(key, value string) string { } return doc.String() - } diff --git a/internal/operator/storage.go b/internal/operator/storage.go index da06087deb..dd168457de 100644 --- a/internal/operator/storage.go +++ b/internal/operator/storage.go @@ -1,7 +1,7 @@ package operator /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -33,7 +33,7 @@ func (s StorageResult) InlineVolumeSource() string { b := new(bytes.Buffer) e := json.NewEncoder(b) e.SetEscapeHTML(false) - e.Encode(s.VolumeSource()) + _ = e.Encode(s.VolumeSource()) // remove trailing newline and surrounding brackets return b.String()[1 : b.Len()-2] diff --git a/internal/operator/storage_test.go b/internal/operator/storage_test.go index 280b1c6cd0..d6c43466fb 100644 --- a/internal/operator/storage_test.go +++ b/internal/operator/storage_test.go @@ -1,7 +1,7 @@ package operator /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -32,10 +32,14 @@ func TestStorageResultInlineVolumeSource(t *testing.T) { expected string }{ {StorageResult{}, `"emptyDir":{}`}, - {StorageResult{PersistentVolumeClaimName: "<\x00"}, - `"persistentVolumeClaim":{"claimName":"<\u0000"}`}, - {StorageResult{PersistentVolumeClaimName: "some-name"}, - `"persistentVolumeClaim":{"claimName":"some-name"}`}, + { + StorageResult{PersistentVolumeClaimName: "<\x00"}, + `"persistentVolumeClaim":{"claimName":"<\u0000"}`, + }, + { + StorageResult{PersistentVolumeClaimName: "some-name"}, + `"persistentVolumeClaim":{"claimName":"some-name"}`, + }, } { if actual := tt.value.InlineVolumeSource(); actual != tt.expected { t.Errorf("expected %q for %v, got %q", tt.expected, tt.value, actual) diff --git a/internal/operator/switchover.go b/internal/operator/switchover.go new file mode 100644 index 0000000000..7b45f93249 --- /dev/null +++ b/internal/operator/switchover.go @@ -0,0 +1,113 @@ +package operator + +/* + Copyright 2021 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "fmt" + + "github.com/crunchydata/postgres-operator/internal/kubeapi" + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + log "github.com/sirupsen/logrus" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// Switchover performs a controlled switchover within a PostgreSQL cluster, i.e. +// demoting a primary and promoting a replica. There are two types of switchover +// methods that can be invoked. +// +// Method #1: Automatic Choice +// +// The switchover command invokves Patroni which works as such: +// +// 1. The function looks for all available replicas as well as the current +// primary. We look up the primary for convenience to avoid various API calls +// +// 2. We then search over the list to find both a primary and a suitable +// candidate for promotion. A candidate is suitable if: +// +// - It is on the latest timeline +// - It has the least amount of replication lag +// +// This is done to limit the risk of data loss. +// +// If either a primary or candidate is **not** found, we do not switch over. +// +// 3. If all of the above works successfully, a switchover is attempted. +// +// Method #2: Targeted Choice +// +// 1. If the "target" parameter, which should contain the name of the target +// instances (Deployment), is not empty then we will attempt to locate that +// target Pod. +// +// 2. The target Pod name, called the candidate is passed into the switchover +// command generation function, and then is ultimately used in the switchover. +func Switchover(clientset kubernetes.Interface, restConfig *rest.Config, cluster *crv1.Pgcluster, target string) error { + // the method to get the pod is dictated by whether or not there is a target + // specified. + // + // If target is specified, then we will attempt to get the Pod that + // represents that target. + // + // If it is not specified, then we will attempt to get any Pod. + // + // If either errors, we will return an error + candidate := "" + pod, err := getCandidatePod(clientset, cluster, target) + + if err != nil { + return err + } + + if target != "" { + candidate = pod.Name + } + + // generate the command + cmd := generatePostgresSwitchoverCommand(cluster.Name, candidate) + + // good to generally log which instances are being used in the switchover + log.Infof("controlled switchover started for cluster %q", cluster.Name) + + if _, stderr, err := kubeapi.ExecToPodThroughAPI(restConfig, clientset, + cmd, "database", pod.Name, cluster.Namespace, nil); err != nil { + return fmt.Errorf(stderr) + } + + log.Infof("controlled switchover completed for cluster %q", cluster.Name) + + // and that's all + return nil +} + +// generatePostgresSwitchoverCommand creates the command that is used to issue +// a switchover (demote a primary, promote a replica). +// +// There are two ways to run this command: +// +// 1. Pass in only a clusterName. Patroni will select the best candidate +// 2. Pass in a clusterName AND a target candidate name, which has to be the +// name of a Pod +func generatePostgresSwitchoverCommand(clusterName, candidate string) []string { + cmd := []string{"patronictl", "switchover", "--force", clusterName} + + if candidate != "" { + cmd = append(cmd, "--candidate", candidate) + } + + return cmd +} diff --git a/internal/operator/switchover_test.go b/internal/operator/switchover_test.go new file mode 100644 index 0000000000..50258d32d1 --- /dev/null +++ b/internal/operator/switchover_test.go @@ -0,0 +1,45 @@ +package operator + +/* + Copyright 2021 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "reflect" + "testing" +) + +func TestGeneratePostgresSwitchoverCommand(t *testing.T) { + clusterName := "hippo" + candidate := "" + + t.Run("no candidate", func(t *testing.T) { + expected := []string{"patronictl", "switchover", "--force", clusterName} + actual := generatePostgresSwitchoverCommand(clusterName, candidate) + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v actual: %v", expected, actual) + } + }) + + t.Run("candidate", func(t *testing.T) { + candidate = "hippo-abc-123" + expected := []string{"patronictl", "switchover", "--force", clusterName, "--candidate", candidate} + actual := generatePostgresSwitchoverCommand(clusterName, candidate) + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v actual: %v", expected, actual) + } + }) +} diff --git a/internal/operator/task/applypolicies.go b/internal/operator/task/applypolicies.go index c3d1d306c9..f21a4ca9de 100644 --- a/internal/operator/task/applypolicies.go +++ b/internal/operator/task/applypolicies.go @@ -1,7 +1,7 @@ package task /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -33,13 +33,13 @@ func ApplyPolicies(clusterName string, clientset kubeapi.Interface, RESTConfig * task, err := clientset.CrunchydataV1().Pgtasks(ns).Get(ctx, taskName, metav1.GetOptions{}) if err == nil { - //apply those policies + // apply those policies for k := range task.Spec.Parameters { log.Debugf("applying policy %s to %s", k, clusterName) applyPolicy(clientset, RESTConfig, k, clusterName, ns) } - //delete the pgtask to not redo this again - clientset.CrunchydataV1().Pgtasks(ns).Delete(ctx, taskName, metav1.DeleteOptions{}) + // delete the pgtask to not redo this again + _ = clientset.CrunchydataV1().Pgtasks(ns).Delete(ctx, taskName, metav1.DeleteOptions{}) } } @@ -70,11 +70,10 @@ func applyPolicy(clientset kubeapi.Interface, restconfig *rest.Config, policyNam log.Error(err) } - //update the pgcluster crd labels with the new policy + // update the pgcluster crd labels with the new policy log.Debugf("patching cluster %s: %s", cl.Spec.Name, patch) _, err = clientset.CrunchydataV1().Pgclusters(ns).Patch(ctx, cl.Spec.Name, types.MergePatchType, patch, metav1.PatchOptions{}) if err != nil { log.Error(err) } - } diff --git a/internal/operator/task/rmbackups.go b/internal/operator/task/rmbackups.go deleted file mode 100644 index 91f48ee2e6..0000000000 --- a/internal/operator/task/rmbackups.go +++ /dev/null @@ -1,42 +0,0 @@ -package task - -/* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import ( - "context" - - "github.com/crunchydata/postgres-operator/internal/config" - crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" - log "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -// RemoveBackups ... -func RemoveBackups(namespace string, clientset kubernetes.Interface, task *crv1.Pgtask) { - ctx := context.TODO() - - //delete any backup jobs for this cluster - //kubectl delete job --selector=pg-cluster=clustername - - log.Debugf("deleting backup jobs with selector=%s=%s", config.LABEL_PG_CLUSTER, task.Spec.Parameters[config.LABEL_PG_CLUSTER]) - deletePropagation := metav1.DeletePropagationForeground - clientset. - BatchV1().Jobs(namespace). - DeleteCollection(ctx, - metav1.DeleteOptions{PropagationPolicy: &deletePropagation}, - metav1.ListOptions{LabelSelector: config.LABEL_PG_CLUSTER + "=" + task.Spec.Parameters[config.LABEL_PG_CLUSTER]}) -} diff --git a/internal/operator/task/rmdata.go b/internal/operator/task/rmdata.go index b44c529b4b..643406848b 100644 --- a/internal/operator/task/rmdata.go +++ b/internal/operator/task/rmdata.go @@ -1,7 +1,7 @@ package task /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -47,13 +47,14 @@ type rmdatajobTemplateFields struct { RemoveBackup string IsBackup string IsReplica string + Tolerations string } // RemoveData ... func RemoveData(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask) { ctx := context.TODO() - //create marker (clustername, namespace) + // create marker (clustername, namespace) patch, err := kubeapi.NewJSONPatch(). Add("spec", "parameters", config.LABEL_DELETE_DATA_STARTED)(time.Now().Format(time.RFC3339)). Bytes() @@ -67,8 +68,8 @@ func RemoveData(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask return } - //create the Job to remove the data - //pvcName := task.Spec.Parameters[config.LABEL_PVC_NAME] + // create the Job to remove the data + // pvcName := task.Spec.Parameters[config.LABEL_PVC_NAME] clusterName := task.Spec.Parameters[config.LABEL_PG_CLUSTER] clusterPGHAScope := task.Spec.Parameters[config.LABEL_PGHA_SCOPE] replicaName := task.Spec.Parameters[config.LABEL_REPLICA_NAME] @@ -83,13 +84,6 @@ func RemoveData(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask return } - // if the clustername is not empty, get the pgcluster - cluster, err := clientset.CrunchydataV1().Pgclusters(namespace).Get(ctx, clusterName, metav1.GetOptions{}) - if err != nil { - log.Error(err) - return - } - jobName := clusterName + "-rmdata-" + util.RandStringBytesRmndr(4) jobFields := rmdatajobTemplateFields{ @@ -102,46 +96,46 @@ func RemoveData(namespace string, clientset kubeapi.Interface, task *crv1.Pgtask RemoveBackup: removeBackup, IsReplica: isReplica, IsBackup: isBackup, - PGOImagePrefix: util.GetValueOrDefault(cluster.Spec.PGOImagePrefix, operator.Pgo.Pgo.PGOImagePrefix), + PGOImagePrefix: util.GetValueOrDefault(task.Spec.Parameters[config.LABEL_IMAGE_PREFIX], operator.Pgo.Pgo.PGOImagePrefix), PGOImageTag: operator.Pgo.Pgo.PGOImageTag, SecurityContext: operator.GetPodSecurityContext(task.Spec.StorageSpec.GetSupplementalGroups()), + Tolerations: task.Spec.Parameters[config.LABEL_RM_TOLERATIONS], } + log.Debugf("creating rmdata job %s for cluster %s ", jobName, task.Spec.Name) - var doc2 bytes.Buffer - err = config.RmdatajobTemplate.Execute(&doc2, jobFields) - if err != nil { - log.Error(err.Error()) - return + if operator.CRUNCHY_DEBUG { + _ = config.RmdatajobTemplate.Execute(os.Stdout, jobFields) } - if operator.CRUNCHY_DEBUG { - config.RmdatajobTemplate.Execute(os.Stdout, jobFields) + doc := bytes.Buffer{} + if err := config.RmdatajobTemplate.Execute(&doc, jobFields); err != nil { + log.Error(err) + return } - newjob := v1batch.Job{} - err = json.Unmarshal(doc2.Bytes(), &newjob) - if err != nil { + job := v1batch.Job{} + if err := json.Unmarshal(doc.Bytes(), &job); err != nil { log.Error("error unmarshalling json into Job " + err.Error()) return } // set the container image to an override value, if one exists operator.SetContainerImageOverride(config.CONTAINER_IMAGE_PGO_RMDATA, - &newjob.Spec.Template.Spec.Containers[0]) + &job.Spec.Template.Spec.Containers[0]) - j, err := clientset.BatchV1().Jobs(namespace).Create(ctx, &newjob, metav1.CreateOptions{}) - if err != nil { - log.Errorf("got error when creating rmdata job %s", newjob.Name) + if _, err := clientset.BatchV1().Jobs(namespace).Create(ctx, &job, metav1.CreateOptions{}); err != nil { + log.Error(err) return } - log.Debugf("successfully created rmdata job %s", j.Name) - publishDeleteCluster(task.Spec.Parameters[config.LABEL_PG_CLUSTER], task.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER], + log.Debugf("successfully created rmdata job %s", job.Name) + + publishDeleteCluster(task.Spec.Parameters[config.LABEL_PG_CLUSTER], task.ObjectMeta.Labels[config.LABEL_PGOUSER], namespace) } -func publishDeleteCluster(clusterName, identifier, username, namespace string) { +func publishDeleteCluster(clusterName, username, namespace string) { topics := make([]string, 1) topics[0] = events.EventTopicCluster diff --git a/internal/operator/task/workflow.go b/internal/operator/task/workflow.go index 43c6e1100b..02f58369b4 100644 --- a/internal/operator/task/workflow.go +++ b/internal/operator/task/workflow.go @@ -1,7 +1,7 @@ package task /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -30,19 +30,15 @@ import ( // CompleteCreateClusterWorkflow ... update the pgtask for the // create cluster workflow for a given cluster func CompleteCreateClusterWorkflow(clusterName string, clientset pgo.Interface, ns string) { - taskName := clusterName + "-" + crv1.PgtaskWorkflowCreateClusterType completeWorkflow(clientset, ns, taskName) - } func CompleteBackupWorkflow(clusterName string, clientset pgo.Interface, ns string) { - taskName := clusterName + "-" + crv1.PgtaskWorkflowBackupType completeWorkflow(clientset, ns, taskName) - } func completeWorkflow(clientset pgo.Interface, taskNamespace, taskName string) { @@ -54,7 +50,7 @@ func completeWorkflow(clientset pgo.Interface, taskNamespace, taskName string) { return } - //mark this workflow as completed + // mark this workflow as completed id := task.Spec.Parameters[crv1.PgtaskWorkflowID] log.Debugf("completing workflow %s id %s", taskName, id) @@ -72,5 +68,4 @@ func completeWorkflow(clientset pgo.Interface, taskNamespace, taskName string) { if err != nil { log.Error(err) } - } diff --git a/internal/operator/wal.go b/internal/operator/wal.go index 1b679755fb..9005a27141 100644 --- a/internal/operator/wal.go +++ b/internal/operator/wal.go @@ -1,7 +1,7 @@ package operator /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/patroni/doc.go b/internal/patroni/doc.go index 63a42c84d3..bd7055962a 100644 --- a/internal/patroni/doc.go +++ b/internal/patroni/doc.go @@ -4,7 +4,7 @@ package patroni /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/patroni/patroni.go b/internal/patroni/patroni.go index 50d75bec33..3570c25b9f 100644 --- a/internal/patroni/patroni.go +++ b/internal/patroni/patroni.go @@ -1,7 +1,7 @@ package patroni /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -35,14 +35,14 @@ import ( const dbContainerName = "database" var ( - // reloadCMD is the command for reloading a specific PG instance (primary or replica) within a - // PG cluster - reloadCMD = []string{"/bin/bash", "-c", - fmt.Sprintf("curl -X POST --silent http://127.0.0.1:%s/reload", config.DEFAULT_PATRONI_PORT)} - // restartCMD is the command for restart a specific PG database (primary or replica) within a - // PG cluster - restartCMD = []string{"/bin/bash", "-c", - fmt.Sprintf("curl -X POST --silent http://127.0.0.1:%s/restart", config.DEFAULT_PATRONI_PORT)} + // reloadCMD is the command for reloading a specific PG instance (primary or + // replica) within a Postgres cluster. It requires a cluster and instance name + // to be appended to it + reloadCMD = []string{"patronictl", "reload", "--force"} + // restartCMD is the command for restart a specific PG database (primary or + // replica) within a Postgres cluster. It requires a cluster and instance name + // to be appended to it. + restartCMD = []string{"patronictl", "restart", "--force"} // ErrInstanceNotFound is the error thrown when a target instance cannot be found in the cluster ErrInstanceNotFound = errors.New("The instance does not exist in the cluster") @@ -77,7 +77,6 @@ type RestartResult struct { // NewPatroniClient creates a new Patroni client func NewPatroniClient(restConfig *rest.Config, kubeclientset kubernetes.Interface, clusterName, namespace string) Client { - return &patroniClient{ restConfig: restConfig, kubeclientset: kubeclientset, @@ -112,7 +111,6 @@ func (p *patroniClient) getClusterInstances() (map[string]corev1.Pod, error) { // ReloadCluster reloads the configuration for a PostgreSQL cluster. Specififcally, a Patroni // reload (which includes a PG reload) is executed on the primary and each replica within the cluster. func (p *patroniClient) ReloadCluster() error { - instanceMap, err := p.getClusterInstances() if err != nil { return err @@ -131,7 +129,6 @@ func (p *patroniClient) ReloadCluster() error { // Patroni restart is executed on the primary and each replica within the cluster. A slice is also // returned containing the names of all instances restarted within the cluster. func (p *patroniClient) RestartCluster() ([]RestartResult, error) { - var restartResult []RestartResult instanceMap, err := p.getClusterInstances() @@ -156,7 +153,6 @@ func (p *patroniClient) RestartCluster() ([]RestartResult, error) { // RestartInstances restarts the PostgreSQL databases for the instances specified. Specififcally, a // Patroni restart is executed on the primary and each replica within the cluster. func (p *patroniClient) RestartInstances(instances ...string) ([]RestartResult, error) { - var restartResult []RestartResult instanceMap, err := p.getClusterInstances() @@ -195,25 +191,28 @@ func (p *patroniClient) RestartInstances(instances ...string) ([]RestartResult, // reload performs a Patroni reload (which includes a PG reload) on a specific instance (primary or // replica) within a PG cluster func (p *patroniClient) reload(podName string) error { + cmd := reloadCMD + cmd = append(cmd, p.clusterName, podName) + + stdout, stderr, err := kubeapi.ExecToPodThroughAPI(p.restConfig, p.kubeclientset, + cmd, dbContainerName, podName, p.namespace, nil) - stdout, stderr, err := kubeapi.ExecToPodThroughAPI(p.restConfig, p.kubeclientset, reloadCMD, - dbContainerName, podName, p.namespace, nil) if err != nil { - return err - } else if stderr != "" { return fmt.Errorf(stderr) } log.Debugf("Successfully reloaded PG on pod %s: %s", podName, stdout) - return err + return nil } // restart performs a Patroni restart on a specific instance (primary or replica) within a PG // cluster. func (p *patroniClient) restart(podName string) error { + cmd := restartCMD + cmd = append(cmd, p.clusterName, podName) - stdout, stderr, err := kubeapi.ExecToPodThroughAPI(p.restConfig, p.kubeclientset, restartCMD, + stdout, stderr, err := kubeapi.ExecToPodThroughAPI(p.restConfig, p.kubeclientset, cmd, dbContainerName, podName, p.namespace, nil) if err != nil { return err diff --git a/internal/pgadmin/backoff.go b/internal/pgadmin/backoff.go index d1df68c80d..ab9176ae4c 100644 --- a/internal/pgadmin/backoff.go +++ b/internal/pgadmin/backoff.go @@ -1,7 +1,7 @@ package pgadmin /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -40,6 +40,7 @@ const ( ) // Apply provides a new time with respect to t based on the jitter mode +// #nosec: G404 func (jm Jitter) Apply(t time.Duration) time.Duration { switch jm { case JitterNone: // being explicit in case default case changes diff --git a/internal/pgadmin/backoff_test.go b/internal/pgadmin/backoff_test.go index aeae16f7a5..b87d8db6d7 100644 --- a/internal/pgadmin/backoff_test.go +++ b/internal/pgadmin/backoff_test.go @@ -1,7 +1,7 @@ package pgadmin /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -108,7 +108,6 @@ func TestSubscripts(t *testing.T) { t.Fail() } } - } func TestUniformPolicy(t *testing.T) { diff --git a/internal/pgadmin/crypto.go b/internal/pgadmin/crypto.go index 55ebc8b771..f602e63dca 100644 --- a/internal/pgadmin/crypto.go +++ b/internal/pgadmin/crypto.go @@ -1,7 +1,7 @@ package pgadmin /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/pgadmin/crypto_test.go b/internal/pgadmin/crypto_test.go index 36f8468379..6f11a665a5 100644 --- a/internal/pgadmin/crypto_test.go +++ b/internal/pgadmin/crypto_test.go @@ -1,7 +1,7 @@ package pgadmin /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -28,8 +28,10 @@ var testData = struct { clearPW: "w052H0UBM783B$x6N___", encPW: "5PN+lp8XXalwRzCptI21hmT5S9FvvEYpD8chWa39akY6Srwl", key: "$pbkdf2-sha512$19000$knLuvReC8H7v/T8n5JwTwg$OsVGpDa/zpCE2pKEOsZ4/SqdxcQZ0UU6v41ev/gkk4ROsrws/4I03oHqN37k.v1d25QckESs3NlPxIUv5gTf2Q", - iv: []byte{0xe4, 0xf3, 0x7e, 0x96, 0x9f, 0x17, 0x5d, 0xa9, - 0x70, 0x47, 0x30, 0xa9, 0xb4, 0x8d, 0xb5, 0x86}, + iv: []byte{ + 0xe4, 0xf3, 0x7e, 0x96, 0x9f, 0x17, 0x5d, 0xa9, + 0x70, 0x47, 0x30, 0xa9, 0xb4, 0x8d, 0xb5, 0x86, + }, } func TestSymmetry(t *testing.T) { diff --git a/internal/pgadmin/doc.go b/internal/pgadmin/doc.go index 97900b0227..f822a2051c 100644 --- a/internal/pgadmin/doc.go +++ b/internal/pgadmin/doc.go @@ -4,7 +4,7 @@ database which powers pgadmin */ package pgadmin /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/pgadmin/hash.go b/internal/pgadmin/hash.go index b73222fb8b..4f7ffdd5e7 100644 --- a/internal/pgadmin/hash.go +++ b/internal/pgadmin/hash.go @@ -1,7 +1,7 @@ package pgadmin /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -54,7 +54,7 @@ func HashPassword(qr *queryRunner, pass string) (string, error) { // Generate a "new" password derived from the provided password // Satisfies OWASP sec. 2.4.5: 'provide additional iteration of a key derivation' mac := hmac.New(sha512.New, saltBytes) - mac.Write([]byte(pass)) + _, _ = mac.Write([]byte(pass)) macBytes := mac.Sum(nil) macBase64 := base64.StdEncoding.EncodeToString(macBytes) diff --git a/internal/pgadmin/logic.go b/internal/pgadmin/logic.go index 68426ae91b..3632a6fbb8 100644 --- a/internal/pgadmin/logic.go +++ b/internal/pgadmin/logic.go @@ -1,7 +1,7 @@ package pgadmin /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/pgadmin/runner.go b/internal/pgadmin/runner.go index 233dc092be..85f4586c76 100644 --- a/internal/pgadmin/runner.go +++ b/internal/pgadmin/runner.go @@ -1,7 +1,7 @@ package pgadmin /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -102,7 +102,7 @@ func (qr *queryRunner) EnsureReady() error { cmd, qr.Pod.Spec.Containers[0].Name, qr.Pod.Name, qr.Namespace, nil) if err != nil && !strings.Contains(stderr, "no such table") { - lastError = fmt.Errorf("%v - %v", err, stderr) + lastError = fmt.Errorf("%w - %v", err, stderr) nextRoundIn := qr.BackoffPolicy.Duration(i) log.Debugf("[InitWait attempt %02d]: %v - retry in %v", i, err, nextRoundIn) time.Sleep(nextRoundIn) @@ -121,7 +121,7 @@ func (qr *queryRunner) EnsureReady() error { } } if lastError != nil && output == "" { - return fmt.Errorf("error executing query: %v", lastError) + return fmt.Errorf("error executing query: %w", lastError) } return nil @@ -141,7 +141,7 @@ func (qr *queryRunner) Exec(query string) error { _, stderr, err := kubeapi.ExecToPodThroughAPI(qr.apicfg, qr.clientset, cmd, qr.Pod.Spec.Containers[0].Name, qr.Pod.Name, qr.Namespace, nil) if err != nil { - lastError = fmt.Errorf("%v - %v", err, stderr) + lastError = fmt.Errorf("%w - %v", err, stderr) nextRoundIn := qr.BackoffPolicy.Duration(i) log.Debugf("[Exec attempt %02d]: %v - retry in %v", i, err, nextRoundIn) time.Sleep(nextRoundIn) @@ -151,7 +151,7 @@ func (qr *queryRunner) Exec(query string) error { } } if lastError != nil { - return fmt.Errorf("error executing query: %vv", lastError) + return fmt.Errorf("error executing query: %w", lastError) } return nil @@ -178,7 +178,7 @@ func (qr *queryRunner) Query(query string) (string, error) { stdout, stderr, err := kubeapi.ExecToPodThroughAPI(qr.apicfg, qr.clientset, cmd, qr.Pod.Spec.Containers[0].Name, qr.Pod.Name, qr.Namespace, nil) if err != nil { - lastError = fmt.Errorf("%v - %v", err, stderr) + lastError = fmt.Errorf("%w - %v", err, stderr) nextRoundIn := qr.BackoffPolicy.Duration(i) log.Debugf("[Query attempt %02d]: %v - retry in %v", i, err, nextRoundIn) time.Sleep(nextRoundIn) @@ -189,7 +189,7 @@ func (qr *queryRunner) Query(query string) (string, error) { } } if lastError != nil && output == "" { - return "", fmt.Errorf("error executing query: %v", lastError) + return "", fmt.Errorf("error executing query: %w", lastError) } return output, nil diff --git a/internal/pgadmin/server.go b/internal/pgadmin/server.go index 26568e8806..8ee8cd4650 100644 --- a/internal/pgadmin/server.go +++ b/internal/pgadmin/server.go @@ -1,7 +1,7 @@ package pgadmin /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/postgres/doc.go b/internal/postgres/doc.go index 974cb7c8df..5066f642d4 100644 --- a/internal/postgres/doc.go +++ b/internal/postgres/doc.go @@ -5,7 +5,7 @@ package postgres /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/postgres/password/doc.go b/internal/postgres/password/doc.go index 6ea6563873..4ac3b5fd1d 100644 --- a/internal/postgres/password/doc.go +++ b/internal/postgres/password/doc.go @@ -4,7 +4,7 @@ package password /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/internal/postgres/password/md5.go b/internal/postgres/password/md5.go index 030fd21765..4973140fae 100644 --- a/internal/postgres/password/md5.go +++ b/internal/postgres/password/md5.go @@ -1,7 +1,7 @@ package password /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,15 +16,14 @@ package password */ import ( + // #nosec G501 "crypto/md5" "errors" "fmt" ) -var ( - // ErrMD5PasswordInvalid is returned when the password attributes are invalid - ErrMD5PasswordInvalid = errors.New(`invalid password attributes. must provide "username" and "password"`) -) +// ErrMD5PasswordInvalid is returned when the password attributes are invalid +var ErrMD5PasswordInvalid = errors.New(`invalid password attributes. must provide "username" and "password"`) // MD5Password implements the PostgresPassword interface for hashing passwords // using the PostgreSQL MD5 method @@ -42,6 +41,7 @@ func (m *MD5Password) Build() (string, error) { plaintext := []byte(m.password + m.username) // finish the transformation by getting the string value of the MD5 hash and // encoding it in hexadecimal for PostgreSQL, appending "md5" to the front + // #nosec G401 return fmt.Sprintf("md5%x", md5.Sum(plaintext)), nil } diff --git a/internal/postgres/password/md5_test.go b/internal/postgres/password/md5_test.go index c77c8abf43..5f55a8a222 100644 --- a/internal/postgres/password/md5_test.go +++ b/internal/postgres/password/md5_test.go @@ -1,7 +1,7 @@ package password /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -38,7 +38,6 @@ func TestMD5Build(t *testing.T) { } hash, err := md5.Build() - if err != nil { t.Error(err) } diff --git a/internal/postgres/password/password.go b/internal/postgres/password/password.go index b70112a4c3..a667921707 100644 --- a/internal/postgres/password/password.go +++ b/internal/postgres/password/password.go @@ -1,7 +1,7 @@ package password /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -31,10 +31,8 @@ const ( SCRAM ) -var ( - // ErrPasswordType is returned when a password type does not exist - ErrPasswordType = errors.New("password type does not exist") -) +// ErrPasswordType is returned when a password type does not exist +var ErrPasswordType = errors.New("password type does not exist") // PostgresPassword is the interface that defines the methods required to build // a password for PostgreSQL in a desired format (e.g. MD5) diff --git a/internal/postgres/password/password_test.go b/internal/postgres/password/password_test.go index b9b7094dbc..dda95e1dc4 100644 --- a/internal/postgres/password/password_test.go +++ b/internal/postgres/password/password_test.go @@ -1,7 +1,7 @@ package password /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,6 +16,7 @@ package password */ import ( + "errors" "testing" ) @@ -27,7 +28,6 @@ func TestNewPostgresPassword(t *testing.T) { passwordType := MD5 postgresPassword, err := NewPostgresPassword(passwordType, username, password) - if err != nil { t.Error(err) } @@ -49,7 +49,6 @@ func TestNewPostgresPassword(t *testing.T) { passwordType := SCRAM postgresPassword, err := NewPostgresPassword(passwordType, username, password) - if err != nil { t.Error(err) } @@ -66,7 +65,7 @@ func TestNewPostgresPassword(t *testing.T) { t.Run("invalid", func(t *testing.T) { passwordType := PasswordType(-1) - if _, err := NewPostgresPassword(passwordType, username, password); err != ErrPasswordType { + if _, err := NewPostgresPassword(passwordType, username, password); !errors.Is(err, ErrPasswordType) { t.Errorf("expected error: %q", err.Error()) } }) diff --git a/internal/postgres/password/scram.go b/internal/postgres/password/scram.go index aa6eee3df8..6e0f75e22b 100644 --- a/internal/postgres/password/scram.go +++ b/internal/postgres/password/scram.go @@ -1,7 +1,7 @@ package password /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -96,7 +96,6 @@ type SCRAMPassword struct { func (s *SCRAMPassword) Build() (string, error) { // get a generated salt salt, err := s.generateSalt(s.SaltLength) - if err != nil { return "", err } diff --git a/internal/postgres/password/scram_test.go b/internal/postgres/password/scram_test.go index 6de92bb17c..b568688e37 100644 --- a/internal/postgres/password/scram_test.go +++ b/internal/postgres/password/scram_test.go @@ -1,7 +1,7 @@ package password /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -54,7 +54,6 @@ func TestScramGenerateSalt(t *testing.T) { for _, saltLength := range saltLengths { t.Run(fmt.Sprintf("salt length %d", saltLength), func(t *testing.T) { salt, err := scramGenerateSalt(saltLength) - if err != nil { t.Error(err) } @@ -71,7 +70,6 @@ func TestScramGenerateSalt(t *testing.T) { for _, saltLength := range saltLengths { t.Run(fmt.Sprintf("salt length %d", saltLength), func(t *testing.T) { - if _, err := scramGenerateSalt(saltLength); err == nil { t.Errorf("error expected for salt length of %d", saltLength) } @@ -82,7 +80,6 @@ func TestScramGenerateSalt(t *testing.T) { func TestSCRAMBuild(t *testing.T) { t.Run("scram-sha-256", func(t *testing.T) { - t.Run("valid", func(t *testing.T) { // check a few different password combinations. note: the salt is kept the // same so we can get a reproducible result @@ -104,7 +101,6 @@ func TestSCRAMBuild(t *testing.T) { scram.generateSalt = mockGenerateSalt hash, err := scram.Build() - if err != nil { t.Error(err) } @@ -152,7 +148,7 @@ func TestSCRAMHash(t *testing.T) { expected, _ := hex.DecodeString("877cc977e7b033e10d6e0b0d666da1f463bc51b1de48869250a0347ec1b2b8b3") actual := scram.hash(sha256.New, []byte("hippo")) - if bytes.Compare(expected, actual) != 0 { + if !bytes.Equal(expected, actual) { t.Errorf("expected: %x actual %x", expected, actual) } }) @@ -164,7 +160,7 @@ func TestSCRAMHMAC(t *testing.T) { expected, _ := hex.DecodeString("ac9872eb21043142c3bf073c9fa4caf9553940750ef7b85116905aaa456a2d07") actual := scram.hmac(sha256.New, []byte("hippo"), []byte("datalake")) - if bytes.Compare(expected, actual) != 0 { + if !bytes.Equal(expected, actual) { t.Errorf("expected: %x actual %x", expected, actual) } }) diff --git a/internal/tlsutil/primitives.go b/internal/tlsutil/primitives.go index 03fb73f744..5059d93142 100644 --- a/internal/tlsutil/primitives.go +++ b/internal/tlsutil/primitives.go @@ -1,7 +1,7 @@ package tlsutil /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,8 +16,9 @@ limitations under the License. */ import ( + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" - "crypto/rsa" "crypto/x509" "encoding/pem" "errors" @@ -29,20 +30,20 @@ import ( ) const ( - rsaKeySize = 2048 duration365d = time.Hour * 24 * 365 ) // newPrivateKey returns randomly generated RSA private key. -func NewPrivateKey() (*rsa.PrivateKey, error) { - return rsa.GenerateKey(rand.Reader, rsaKeySize) +func NewPrivateKey() (*ecdsa.PrivateKey, error) { + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) } // encodePrivateKeyPEM encodes the given private key pem and returns bytes (base64). -func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte { +func EncodePrivateKeyPEM(key *ecdsa.PrivateKey) []byte { + raw, _ := x509.MarshalECPrivateKey(key) return pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(key), + Type: "EC PRIVATE KEY", + Bytes: raw, }) } @@ -64,17 +65,17 @@ func ParsePEMEncodedCert(pemdata []byte) (*x509.Certificate, error) { } // parsePEMEncodedPrivateKey parses a private key from given pemdata -func ParsePEMEncodedPrivateKey(pemdata []byte) (*rsa.PrivateKey, error) { +func ParsePEMEncodedPrivateKey(pemdata []byte) (*ecdsa.PrivateKey, error) { decoded, _ := pem.Decode(pemdata) if decoded == nil { return nil, errors.New("no PEM data found") } - return x509.ParsePKCS1PrivateKey(decoded.Bytes) + return x509.ParseECPrivateKey(decoded.Bytes) } // newSelfSignedCACertificate returns a self-signed CA certificate based on given configuration and private key. // The certificate has one-year lease. -func NewSelfSignedCACertificate(key *rsa.PrivateKey) (*x509.Certificate, error) { +func NewSelfSignedCACertificate(key *ecdsa.PrivateKey) (*x509.Certificate, error) { serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64)) if err != nil { return nil, err @@ -87,6 +88,7 @@ func NewSelfSignedCACertificate(key *rsa.PrivateKey) (*x509.Certificate, error) KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, IsCA: true, + SignatureAlgorithm: x509.ECDSAWithSHA384, } certDERBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, key.Public(), key) if err != nil { diff --git a/internal/tlsutil/primitives_test.go b/internal/tlsutil/primitives_test.go index 22676e9fbc..fc18100446 100644 --- a/internal/tlsutil/primitives_test.go +++ b/internal/tlsutil/primitives_test.go @@ -1,7 +1,7 @@ package tlsutil /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,7 +17,7 @@ limitations under the License. import ( "bytes" - "crypto/rsa" + "context" "crypto/tls" "crypto/x509" "encoding/base64" @@ -42,7 +42,7 @@ func TestKeyPEMSymmetry(t *testing.T) { t.Log(base64.StdEncoding.EncodeToString(pemKey)) - if !keysEq(oldKey, newKey) { + if !(oldKey.Equal(newKey) && oldKey.PublicKey.Equal(newKey.Public())) { t.Fatal("Decoded key did not match its input source") } } @@ -93,8 +93,9 @@ func TestExtendedTrust(t *testing.T) { defer srv.Close() caTrust := x509.NewCertPool() - ExtendTrust(caTrust, bytes.NewReader(pemCert)) + _ = ExtendTrust(caTrust, bytes.NewReader(pemCert)) + // #nosec G402 srv.TLS = &tls.Config{ ServerName: "Stom", ClientAuth: tls.RequireAndVerifyClientCert, @@ -111,6 +112,7 @@ func TestExtendedTrust(t *testing.T) { } client := srv.Client() + // #nosec G402 client.Transport = &http.Transport{ TLSClientConfig: &tls.Config{ Certificates: []tls.Certificate{ @@ -122,7 +124,12 @@ func TestExtendedTrust(t *testing.T) { } // Confirm server response - res, err := client.Get(srv.URL) + req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, srv.URL, nil) + if err != nil { + t.Fatalf("error getting request - %s\n", err) + } + + res, err := client.Do(req) if err != nil { t.Fatalf("error getting response - %s\n", err) } @@ -137,30 +144,3 @@ func TestExtendedTrust(t *testing.T) { t.Fatalf("expected [%s], got [%s] instead\n", expected, recv) } } - -func keysEq(a, b *rsa.PrivateKey) bool { - if a.E != b.E { - // PublicKey exponent different - return false - } - if a.N.Cmp(b.N) != 0 { - // PublicKey modulus different - return false - } - if a.D.Cmp(b.D) != 0 { - // PrivateKey exponent different - return false - } - if len(a.Primes) != len(b.Primes) { - // Prime factor difference (Tier 1) - return false - } - for i, aPrime := range a.Primes { - if aPrime.Cmp(b.Primes[i]) != 0 { - // Prime factor difference (Tier 2) - return false - } - } - - return true -} diff --git a/internal/util/backrest.go b/internal/util/backrest.go index 66e3a2dec6..123592c7f3 100644 --- a/internal/util/backrest.go +++ b/internal/util/backrest.go @@ -1,7 +1,7 @@ package util /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -15,82 +15,10 @@ package util limitations under the License. */ -import ( - "errors" - "fmt" - "strings" - - crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" -) - const ( BackrestRepoDeploymentName = "%s-backrest-shared-repo" BackrestRepoServiceName = "%s-backrest-shared-repo" BackrestRepoPVCName = "%s-pgbr-repo" - BackrestRepoSecretName = "%s-backrest-repo-config" + // #nosec: G101 + BackrestRepoSecretName = "%s-backrest-repo-config" ) - -// defines the default repo1-path for pgBackRest for use when a specic path is not provided -// in the pgcluster CR. The '%s' format verb will be replaced with the cluster name when this -// variable is utilized -const defaultBackrestRepoPath = "/backrestrepo/%s-backrest-shared-repo" - -// ValidateBackrestStorageTypeOnBackupRestore checks to see if the pgbackrest storage type provided -// when performing either pgbackrest backup or restore is valid. This includes ensuring the value -// provided is a valid storage type (e.g. "s3" and/or "local"). This also includes ensuring the -// storage type specified (e.g. "s3" or "local") is enabled in the current cluster. And finally, -// validation is ocurring for a restore, the ensure only one storage type is selected. -func ValidateBackrestStorageTypeOnBackupRestore(newBackRestStorageType, - currentBackRestStorageType string, restore bool) error { - - if newBackRestStorageType != "" && !IsValidBackrestStorageType(newBackRestStorageType) { - return fmt.Errorf("Invalid value provided for pgBackRest storage type. The following "+ - "values are allowed: %s", "\""+strings.Join(crv1.BackrestStorageTypes, "\", \"")+"\"") - } else if newBackRestStorageType != "" && - strings.Contains(newBackRestStorageType, "s3") && - !strings.Contains(currentBackRestStorageType, "s3") { - return errors.New("Storage type 's3' not allowed. S3 storage is not enabled for " + - "pgBackRest in this cluster") - } else if (newBackRestStorageType == "" || - strings.Contains(newBackRestStorageType, "local")) && - (currentBackRestStorageType != "" && - !strings.Contains(currentBackRestStorageType, "local")) { - return errors.New("Storage type 'local' not allowed. Local storage is not enabled for " + - "pgBackRest in this cluster. If this cluster uses S3 storage only, specify 's3' " + - "for the pgBackRest storage type.") - } - - // storage type validation that is only applicable for restores - if restore && newBackRestStorageType != "" && - len(strings.Split(newBackRestStorageType, ",")) > 1 { - return fmt.Errorf("Multiple storage types cannot be selected cannot be select when "+ - "performing a restore. Please select one of the following: %s", - "\""+strings.Join(crv1.BackrestStorageTypes, "\", \"")+"\"") - } - - return nil -} - -// IsValidBackrestStorageType determines if the storage source string contains valid pgBackRest -// storage type values -func IsValidBackrestStorageType(storageType string) bool { - isValid := true - for _, storageType := range strings.Split(storageType, ",") { - if !IsStringOneOf(storageType, crv1.BackrestStorageTypes...) { - isValid = false - break - } - } - return isValid -} - -// GetPGBackRestRepoPath is responsible for determining the repo path setting (i.e. 'repo1-path' -// flag) for use by pgBackRest. If a specific repo path has been defined in the pgcluster CR, -// then that path will be returned. Otherwise a default path will be returned, which is generated -// using the 'defaultBackrestRepoPath' constant and the cluster name. -func GetPGBackRestRepoPath(cluster crv1.Pgcluster) string { - if cluster.Spec.BackrestRepoPath != "" { - return cluster.Spec.BackrestRepoPath - } - return fmt.Sprintf(defaultBackrestRepoPath, cluster.Name) -} diff --git a/internal/util/cluster.go b/internal/util/cluster.go index b0b72ea6dd..3cc68e7e41 100644 --- a/internal/util/cluster.go +++ b/internal/util/cluster.go @@ -1,7 +1,7 @@ package util /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package util import ( "context" + "encoding/json" "errors" "fmt" "strconv" @@ -28,7 +29,9 @@ import ( log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) @@ -68,14 +71,20 @@ const ( const ( // three of these are exported, as they are used to help add the information // into the templates. Say the last one 10 times fast - BackRestRepoSecretKeyAWSS3KeyAWSS3CACert = "aws-s3-ca.crt" - BackRestRepoSecretKeyAWSS3KeyAWSS3Key = "aws-s3-key" + // #nosec: G101 + BackRestRepoSecretKeyAWSS3KeyAWSS3CACert = "aws-s3-ca.crt" + // #nosec: G101 + BackRestRepoSecretKeyAWSS3KeyAWSS3Key = "aws-s3-key" + // #nosec: G101 BackRestRepoSecretKeyAWSS3KeyAWSS3KeySecret = "aws-s3-key-secret" // the rest are private - backRestRepoSecretKeyAuthorizedKeys = "authorized_keys" - backRestRepoSecretKeySSHConfig = "config" - backRestRepoSecretKeySSHDConfig = "sshd_config" - backRestRepoSecretKeySSHPrivateKey = "id_ed25519" + backRestRepoSecretKeyAuthorizedKeys = "authorized_keys" + backRestRepoSecretKeySSHConfig = "config" + // #nosec: G101 + backRestRepoSecretKeySSHDConfig = "sshd_config" + // #nosec: G101 + backRestRepoSecretKeySSHPrivateKey = "id_ed25519" + // #nosec: G101 backRestRepoSecretKeySSHHostPrivateKey = "ssh_host_ed25519_key" ) @@ -93,108 +102,223 @@ const ( // // The escaping for SQL injections is handled in the SetPostgreSQLPassword // function + // #nosec: G101 sqlSetPasswordDefault = `ALTER ROLE %s PASSWORD %s;` ) var ( + // ErrLabelInvalid indicates that a label is invalid + ErrLabelInvalid = errors.New("invalid label") // ErrMissingConfigAnnotation represents an error thrown when the 'config' annotation is found // to be missing from the 'config' configMap created to store cluster-wide configuration ErrMissingConfigAnnotation error = errors.New("'config' annotation missing from cluster " + "configutation") ) -var ( - // CmdStopPostgreSQL is the command used to stop a PostgreSQL instance, which - // uses the "fast" shutdown mode. This needs a data directory appended to it - cmdStopPostgreSQL = []string{"pg_ctl", "stop", - "-m", "fast", "-D", - } -) +// CmdStopPostgreSQL is the command used to stop a PostgreSQL instance, which +// uses the "fast" shutdown mode. This needs a data directory appended to it +var cmdStopPostgreSQL = []string{ + "pg_ctl", "stop", + "-m", "fast", "-D", +} // CreateBackrestRepoSecrets creates the secrets required to manage the // pgBackRest repo container func CreateBackrestRepoSecrets(clientset kubernetes.Interface, - backrestRepoConfig BackrestRepoConfig) error { + backrestRepoConfig BackrestRepoConfig) (*v1.Secret, error) { ctx := context.TODO() - keys, err := NewPrivatePublicKeyPair() - if err != nil { - return err + // first: determine if a Secret already exists. If it does, we are going to + // work on modifying that Secret. + secretName := fmt.Sprintf("%s-%s", backrestRepoConfig.ClusterName, + config.LABEL_BACKREST_REPO_SECRET) + secret, secretErr := clientset.CoreV1().Secrets(backrestRepoConfig.ClusterNamespace).Get( + ctx, secretName, metav1.GetOptions{}) + + // only return an error if this is a **not** a not found error + if secretErr != nil && !kerrors.IsNotFound(secretErr) { + log.Error(secretErr) + return nil, secretErr } - // Retrieve the S3/SSHD configuration files from secret - configs, err := clientset. + // determine if we need to create a new secret, i.e. this is a not found error + newSecret := secretErr != nil + if newSecret { + // set up the secret for the cluster that contains the pgBackRest information + secret = &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Labels: map[string]string{ + config.LABEL_VENDOR: config.LABEL_CRUNCHY, + config.LABEL_PG_CLUSTER: backrestRepoConfig.ClusterName, + config.LABEL_PGO_BACKREST_REPO: "true", + }, + }, + Data: map[string][]byte{}, + } + } + + // next, load the Operator level pgBackRest secret templates, which contain + // SSHD(...?) and possible S3 credentials + configs, configErr := clientset. CoreV1().Secrets(backrestRepoConfig.OperatorNamespace). - Get(ctx, "pgo-backrest-repo-config", metav1.GetOptions{}) + Get(ctx, config.SecretOperatorBackrestRepoConfig, metav1.GetOptions{}) - if err != nil { - log.Error(err) - return err + if configErr != nil { + log.Error(configErr) + return nil, configErr } - // if an S3 key has been provided via the request, then use key and key secret - // included in the request instead of the default credentials that are - // available in the Operator pgBackRest secret - backrestS3Key := []byte(backrestRepoConfig.BackrestS3Key) + // set the SSH/SSHD configuration, if it is not presently set + for _, key := range []string{backRestRepoSecretKeySSHConfig, backRestRepoSecretKeySSHDConfig} { + if len(secret.Data[key]) == 0 { + secret.Data[key] = configs.Data[key] + } + } - if backrestRepoConfig.BackrestS3Key == "" { - backrestS3Key = configs.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3Key] + // set the SSH keys if any appear to be unset + if len(secret.Data[backRestRepoSecretKeyAuthorizedKeys]) == 0 || + len(secret.Data[backRestRepoSecretKeySSHPrivateKey]) == 0 || + len(secret.Data[backRestRepoSecretKeySSHHostPrivateKey]) == 0 { + // generate the keypair and then assign it to the values in the Secret + keys, keyErr := NewPrivatePublicKeyPair() + + if keyErr != nil { + log.Error(keyErr) + return nil, keyErr + } + + secret.Data[backRestRepoSecretKeyAuthorizedKeys] = keys.Public + secret.Data[backRestRepoSecretKeySSHPrivateKey] = keys.Private + secret.Data[backRestRepoSecretKeySSHHostPrivateKey] = keys.Private } - backrestS3KeySecret := []byte(backrestRepoConfig.BackrestS3KeySecret) + // Set the S3 credentials + // If explicit S3 credentials are passed in, use those. + // If the Secret already has S3 credentials, use those. + // Otherwise, try to load in the default credentials from the Operator Secret. + if len(backrestRepoConfig.BackrestS3CA) != 0 { + secret.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3CACert] = backrestRepoConfig.BackrestS3CA + } - if backrestRepoConfig.BackrestS3KeySecret == "" { - backrestS3KeySecret = configs.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3KeySecret] + if len(secret.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3CACert]) == 0 && + len(configs.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3CACert]) != 0 { + secret.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3CACert] = configs.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3CACert] } - // determine if there is a CA override provided, and if not, use the default - // from the configuration - caCert := backrestRepoConfig.BackrestS3CA - if len(caCert) == 0 { - caCert = configs.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3CACert] + if backrestRepoConfig.BackrestS3Key != "" { + secret.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3Key] = []byte(backrestRepoConfig.BackrestS3Key) } - // set up the secret for the cluster that contains the pgBackRest information - secret := v1.Secret{ + if len(secret.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3Key]) == 0 && + len(configs.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3Key]) != 0 { + secret.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3Key] = configs.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3Key] + } + + if backrestRepoConfig.BackrestS3KeySecret != "" { + secret.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3KeySecret] = []byte(backrestRepoConfig.BackrestS3KeySecret) + } + + if len(secret.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3KeySecret]) == 0 && + len(configs.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3KeySecret]) != 0 { + secret.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3KeySecret] = configs.Data[BackRestRepoSecretKeyAWSS3KeyAWSS3KeySecret] + } + + // time to create or update the secret! + var repoSecret *v1.Secret + var err error + if newSecret { + repoSecret, err = clientset.CoreV1().Secrets(backrestRepoConfig.ClusterNamespace).Create( + ctx, secret, metav1.CreateOptions{}) + } else { + repoSecret, err = clientset.CoreV1().Secrets(backrestRepoConfig.ClusterNamespace).Update( + ctx, secret, metav1.UpdateOptions{}) + } + + return repoSecret, err +} + +// CreateRMDataTask is a legacy method that was moved into this file. This +// spawns the "pgo-rmdata" task which cleans up assets related to removing an +// individual instance or a cluster. I cleaned up the code slightly. +func CreateRMDataTask(clientset kubeapi.Interface, cluster *crv1.Pgcluster, replicaName string, deleteBackups, deleteData, isReplica, isBackup bool) error { + ctx := context.TODO() + taskName := cluster.Name + "-rmdata" + if replicaName != "" { + taskName = replicaName + "-rmdata" + } + + // create pgtask CRD + task := &crv1.Pgtask{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%s", backrestRepoConfig.ClusterName, - config.LABEL_BACKREST_REPO_SECRET), + Name: taskName, Labels: map[string]string{ - config.LABEL_VENDOR: config.LABEL_CRUNCHY, - config.LABEL_PG_CLUSTER: backrestRepoConfig.ClusterName, - config.LABEL_PGO_BACKREST_REPO: "true", + config.LABEL_PG_CLUSTER: cluster.Name, + config.LABEL_RMDATA: "true", }, }, - Data: map[string][]byte{ - BackRestRepoSecretKeyAWSS3KeyAWSS3CACert: caCert, - BackRestRepoSecretKeyAWSS3KeyAWSS3Key: backrestS3Key, - BackRestRepoSecretKeyAWSS3KeyAWSS3KeySecret: backrestS3KeySecret, - backRestRepoSecretKeyAuthorizedKeys: keys.Public, - backRestRepoSecretKeySSHConfig: configs.Data[backRestRepoSecretKeySSHConfig], - backRestRepoSecretKeySSHDConfig: configs.Data[backRestRepoSecretKeySSHDConfig], - backRestRepoSecretKeySSHPrivateKey: keys.Private, - backRestRepoSecretKeySSHHostPrivateKey: keys.Private, + Spec: crv1.PgtaskSpec{ + Name: taskName, + Namespace: cluster.Namespace, + Parameters: map[string]string{ + config.LABEL_DELETE_DATA: strconv.FormatBool(deleteData), + config.LABEL_DELETE_BACKUPS: strconv.FormatBool(deleteBackups), + config.LABEL_IMAGE_PREFIX: cluster.Spec.PGOImagePrefix, + config.LABEL_IS_REPLICA: strconv.FormatBool(isReplica), + config.LABEL_IS_BACKUP: strconv.FormatBool(isBackup), + config.LABEL_PG_CLUSTER: cluster.Name, + config.LABEL_REPLICA_NAME: replicaName, + config.LABEL_PGHA_SCOPE: cluster.ObjectMeta.GetLabels()[config.LABEL_PGHA_SCOPE], + config.LABEL_RM_TOLERATIONS: GetTolerations(cluster.Spec.Tolerations), + }, + TaskType: crv1.PgtaskDeleteData, }, } - _, err = clientset.CoreV1().Secrets(backrestRepoConfig.ClusterNamespace). - Create(ctx, &secret, metav1.CreateOptions{}) - if kubeapi.IsAlreadyExists(err) { - _, err = clientset.CoreV1().Secrets(backrestRepoConfig.ClusterNamespace). - Update(ctx, &secret, metav1.UpdateOptions{}) + if _, err := clientset.CrunchydataV1().Pgtasks(cluster.Namespace).Create(ctx, task, metav1.CreateOptions{}); err != nil { + log.Error(err) + return err } - return err -} -// IsAutofailEnabled - returns true if autofail label is set to true, false if not. -func IsAutofailEnabled(cluster *crv1.Pgcluster) bool { + return nil +} - labels := cluster.ObjectMeta.Labels - failLabel := labels[config.LABEL_AUTOFAIL] +// GenerateNodeAffinity creates a Kubernetes node affinity object suitable for +// storage on our custom resource. For now, it only supports preferred affinity, +// though can be expanded to support more complex rules +func GenerateNodeAffinity(affinityType crv1.NodeAffinityType, key string, values []string) *v1.NodeAffinity { + nodeAffinity := &v1.NodeAffinity{} + // generate the selector requirement, which at this point is just the + // "node label is in" requirement + requirement := v1.NodeSelectorRequirement{ + Key: key, + Values: values, + Operator: v1.NodeSelectorOpIn, + } - log.Debugf("IsAutoFailEnabled: %s", failLabel) + // build out the node affinity based on whether or not this is required or + // preferred (the default) + if affinityType == crv1.NodeAffinityTypeRequired { + // build the required affinity term. + nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{ + NodeSelectorTerms: make([]v1.NodeSelectorTerm, 1), + } + nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0] = v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{requirement}, + } + } else { + // build the preferred affinity term. + term := v1.PreferredSchedulingTerm{ + Weight: crv1.NodeAffinityDefaultWeight, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{requirement}, + }, + } + nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{term} + } - return failLabel == "true" + // return the node affinity rule + return nodeAffinity } // GeneratedPasswordValidUntilDays returns the value for the number of days that @@ -207,7 +331,6 @@ func GeneratedPasswordValidUntilDays(configuredValidUntilDays string) int { // note that "configuredPasswordLength" may be an empty string, and as such // the below line could fail. That's ok though! as we have a default set up validUntilDays, err := strconv.Atoi(configuredValidUntilDays) - // if there is an error...set it to a default if err != nil { validUntilDays = DefaultPasswordValidUntilDays @@ -228,7 +351,6 @@ func GetPrimaryPod(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (*v1 // query the pods pods, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) - // if there is an error, log it and abort if err != nil { return nil, err @@ -236,8 +358,7 @@ func GetPrimaryPod(clientset kubernetes.Interface, cluster *crv1.Pgcluster) (*v1 // if no pods are retirn, then also raise an error if len(pods.Items) == 0 { - err := errors.New(fmt.Sprintf("primary pod not found for selector [%s]", selector)) - return nil, err + return nil, fmt.Errorf("primary pod not found for selector %q", selector) } // Grab the first pod from the list as this is presumably the primary pod @@ -253,7 +374,6 @@ func GetS3CredsFromBackrestRepoSecret(clientset kubernetes.Interface, namespace, s3Secret := AWSS3Secret{} secret, err := clientset.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) - if err != nil { log.Error(err) return s3Secret, err @@ -267,6 +387,25 @@ func GetS3CredsFromBackrestRepoSecret(clientset kubernetes.Interface, namespace, return s3Secret, nil } +// GetTolerations returns any tolerations that may be defined in a tolerations +// in JSON format. Otherwise, it returns an empty string +func GetTolerations(tolerations []v1.Toleration) string { + // if no tolerations, exit early + if len(tolerations) == 0 { + return "" + } + + // turn into a JSON string + s, err := json.MarshalIndent(tolerations, "", " ") + + if err != nil { + log.Errorf("%s: returning empty string", err.Error()) + return "" + } + + return string(s) +} + // SetPostgreSQLPassword updates the password for a PostgreSQL role in the // PostgreSQL cluster by executing into the primary Pod and changing it // @@ -328,3 +467,47 @@ func StopPostgreSQLInstance(clientset kubernetes.Interface, restconfig *rest.Con return nil } + +// ValidateLabels validates if the input is a valid Kubernetes label. +// +// A label is composed of a key and value. +// +// The key can either be a name or have an optional prefix that i +// terminated by a "/", e.g. "prefix/name" +// +// The name must be a valid DNS 1123 value +// THe prefix must be a valid DNS 1123 subdomain +// +// The value can be validated by machinery provided by Kubenretes +// +// Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +func ValidateLabels(labels map[string]string) error { + for k, v := range labels { + // first handle the key + keyParts := strings.Split(k, "/") + + switch len(keyParts) { + default: + return fmt.Errorf("%w: invalid key for "+v, ErrLabelInvalid) + case 2: + if errs := validation.IsDNS1123Subdomain(keyParts[0]); len(errs) > 0 { + return fmt.Errorf("%w: invalid key %s: %s", ErrLabelInvalid, k, strings.Join(errs, ",")) + } + + if errs := validation.IsDNS1123Label(keyParts[1]); len(errs) > 0 { + return fmt.Errorf("%w: invalid key %s: %s", ErrLabelInvalid, k, strings.Join(errs, ",")) + } + case 1: + if errs := validation.IsDNS1123Label(keyParts[0]); len(errs) > 0 { + return fmt.Errorf("%w: invalid key %s: %s", ErrLabelInvalid, k, strings.Join(errs, ",")) + } + } + + // handle the value + if errs := validation.IsValidLabelValue(v); len(errs) > 0 { + return fmt.Errorf("%w: invalid value %s: %s", ErrLabelInvalid, v, strings.Join(errs, ",")) + } + } + + return nil +} diff --git a/internal/util/cluster_test.go b/internal/util/cluster_test.go new file mode 100644 index 0000000000..b12f441b7f --- /dev/null +++ b/internal/util/cluster_test.go @@ -0,0 +1,160 @@ +package util + +/* +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +import ( + "errors" + "reflect" + "testing" + + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +func TestGenerateNodeAffinity(t *testing.T) { + // presently only one rule is allowed, so we are testing for that. future + // tests may need to expand upon that + t.Run("preferred", func(t *testing.T) { + affinityType := crv1.NodeAffinityTypePreferred + key := "foo" + values := []string{"bar", "baz"} + + affinity := GenerateNodeAffinity(affinityType, key, values) + + if affinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + t.Fatalf("expected required node affinity to not be set") + } + + if len(affinity.PreferredDuringSchedulingIgnoredDuringExecution) == 0 { + t.Fatalf("expected preferred node affinity to be set") + } else if len(affinity.PreferredDuringSchedulingIgnoredDuringExecution) > 1 { + t.Fatalf("only expected one rule to be set") + } + + term := affinity.PreferredDuringSchedulingIgnoredDuringExecution[0] + + if term.Weight != crv1.NodeAffinityDefaultWeight { + t.Fatalf("expected weight %d actual %d", crv1.NodeAffinityDefaultWeight, term.Weight) + } + + if len(term.Preference.MatchExpressions) == 0 { + t.Fatalf("expected a match expression to be set") + } else if len(term.Preference.MatchExpressions) > 1 { + t.Fatalf("expected only one match expression to be set") + } + + rule := term.Preference.MatchExpressions[0] + + if rule.Operator != v1.NodeSelectorOpIn { + t.Fatalf("operator expected %s actual %s", v1.NodeSelectorOpIn, rule.Operator) + } + + if rule.Key != key { + t.Fatalf("key expected %s actual %s", key, rule.Key) + } + + if !reflect.DeepEqual(rule.Values, values) { + t.Fatalf("values expected %v actual %v", values, rule.Values) + } + }) + + t.Run("required", func(t *testing.T) { + affinityType := crv1.NodeAffinityTypeRequired + key := "foo" + values := []string{"bar", "baz"} + + affinity := GenerateNodeAffinity(affinityType, key, values) + + if len(affinity.PreferredDuringSchedulingIgnoredDuringExecution) != 0 { + t.Fatalf("expected preferred node affinity to not be set") + } + + if affinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { + t.Fatalf("expected required node affinity to be set") + } + + if len(affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) == 0 { + t.Fatalf("expected required node affinity to have at least one rule.") + } else if len(affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 1 { + t.Fatalf("expected required node affinity to have only one rule.") + } + + term := affinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0] + + if len(term.MatchExpressions) == 0 { + t.Fatalf("expected a match expression to be set") + } else if len(term.MatchExpressions) > 1 { + t.Fatalf("expected only one match expression to be set") + } + + rule := term.MatchExpressions[0] + + if rule.Operator != v1.NodeSelectorOpIn { + t.Fatalf("operator expected %s actual %s", v1.NodeSelectorOpIn, rule.Operator) + } + + if rule.Key != key { + t.Fatalf("key expected %s actual %s", key, rule.Key) + } + + if !reflect.DeepEqual(rule.Values, values) { + t.Fatalf("values expected %v actual %v", values, rule.Values) + } + }) +} + +func TestValidateLabels(t *testing.T) { + t.Run("valid", func(t *testing.T) { + inputs := []map[string]string{ + {"key": "value"}, + {"example.com/key": "value"}, + {"key1": "value1", "key2": "value2"}, + } + + for _, input := range inputs { + t.Run(labels.FormatLabels(input), func(*testing.T) { + err := ValidateLabels(input) + + if err != nil { + t.Fatalf("expected no error, got: %s", err.Error()) + } + }) + } + }) + + t.Run("invalid", func(t *testing.T) { + inputs := []map[string]string{ + {"key=value": "value"}, + {"key": "value", "": ""}, + {"b@d": "value"}, + {"b@d-prefix/key": "value"}, + {"really/bad/prefix/key": "value"}, + {"key": "v\\alue"}, + } + + for _, input := range inputs { + t.Run(labels.FormatLabels(input), func(t *testing.T) { + err := ValidateLabels(input) + + if !errors.Is(err, ErrLabelInvalid) { + t.Fatalf("expected an ErrLabelInvalid error, got %T: %v", err, err) + } + }) + } + }) +} diff --git a/internal/util/exporter.go b/internal/util/exporter.go new file mode 100644 index 0000000000..f8d1a42447 --- /dev/null +++ b/internal/util/exporter.go @@ -0,0 +1,29 @@ +package util + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import "fmt" + +// exporterSecretFormat is the format of the name of the exporter secret, i.e. +// "-exporter-secret" +// #nosec G101 +const exporterSecretFormat = "%s-exporter-secret" + +// GenerateExporterSecretName returns the name of the secret that contains +// information around a monitoring user +func GenerateExporterSecretName(clusterName string) string { + return fmt.Sprintf(exporterSecretFormat, clusterName) +} diff --git a/internal/util/exporter_test.go b/internal/util/exporter_test.go new file mode 100644 index 0000000000..9a80754093 --- /dev/null +++ b/internal/util/exporter_test.go @@ -0,0 +1,32 @@ +package util + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "testing" +) + +func TestGenerateExporterSecretName(t *testing.T) { + t.Run("success", func(t *testing.T) { + clusterName := "hippo" + expected := clusterName + "-exporter-secret" + actual := GenerateExporterSecretName(clusterName) + + if expected != actual { + t.Fatalf("expected %q actual %q", expected, actual) + } + }) +} diff --git a/internal/util/failover.go b/internal/util/failover.go index c17cd556ca..7e0a4165ea 100644 --- a/internal/util/failover.go +++ b/internal/util/failover.go @@ -1,7 +1,7 @@ package util /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,7 +18,6 @@ package util import ( "context" "encoding/json" - "errors" "fmt" "github.com/crunchydata/postgres-operator/internal/config" @@ -40,6 +39,7 @@ type InstanceReplicationInfo struct { Status string Timeline int PendingRestart bool + PodName string Role string } @@ -80,59 +80,20 @@ const ( // instanceReplicationInfoTypePrimaryStandby is the label used by Patroni to indicate that an // instance is indeed a primary PostgreSQL instance, specifically within a standby cluster instanceReplicationInfoTypePrimaryStandby = "Standby Leader" - // instanceRolePrimary indicates that an instance is a primary - instanceRolePrimary = "primary" - // instanceRoleReplica indicates that an instance is a replica - instanceRoleReplica = "replica" + // InstanceRolePrimary indicates that an instance is a primary + InstanceRolePrimary = "primary" + // InstanceRoleReplica indicates that an instance is a replica + InstanceRoleReplica = "replica" // instanceRoleUnknown indicates that an instance is of an unknown typ instanceRoleUnknown = "unknown" // instanceStatusUnavailable indicates an instance is unavailable instanceStatusUnavailable = "unavailable" ) -var ( - // instanceInfoCommand is the command used to get information about the status - // and other statistics about the instances in a PostgreSQL cluster, e.g. - // replication lag - instanceInfoCommand = []string{"patronictl", "list", "-f", "json"} -) - -// GetPod determines the best target to fail to -func GetPod(clientset kubernetes.Interface, deploymentName, namespace string) (*v1.Pod, error) { - ctx := context.TODO() - - var err error - var pod *v1.Pod - var pods *v1.PodList - - selector := config.LABEL_DEPLOYMENT_NAME + "=" + deploymentName + "," + config.LABEL_PGHA_ROLE + "=replica" - pods, err = clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) - if err != nil { - return pod, err - } - if len(pods.Items) != 1 { - return pod, errors.New("could not determine which pod to failover to") - } - - for _, v := range pods.Items { - pod = &v - } - - found := false - - //make sure the pod has a database container it it - for _, c := range pod.Spec.Containers { - if c.Name == "database" { - found = true - } - } - - if !found { - return pod, errors.New("could not find a database container in the pod") - } - - return pod, err -} +// instanceInfoCommand is the command used to get information about the status +// and other statistics about the instances in a PostgreSQL cluster, e.g. +// replication lag +var instanceInfoCommand = []string{"patronictl", "list", "-f", "json"} // ReplicationStatus is responsible for retrieving and returning the replication // information about the status of the replicas in a PostgreSQL cluster. It @@ -178,7 +139,6 @@ func ReplicationStatus(request ReplicationStatusRequest, includePrimary, include log.Debugf(`searching for pods with "%s"`, selector) pods, err := request.Clientset.CoreV1().Pods(request.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) - // If there is an error trying to get the pods, return here. Allow the caller // to handle the error if err != nil { @@ -203,9 +163,9 @@ func ReplicationStatus(request ReplicationStatusRequest, includePrimary, include // From executing and running a command in the first active pod var pod *v1.Pod - for _, p := range pods.Items { - if p.Status.Phase == v1.PodRunning { - pod = &p + for i := range pods.Items { + if pods.Items[i].Status.Phase == v1.PodRunning { + pod = &pods.Items[i] break } } @@ -235,7 +195,6 @@ func ReplicationStatus(request ReplicationStatusRequest, includePrimary, include commandStdOut, _, err := kubeapi.ExecToPodThroughAPI( request.RESTConfig, request.Clientset, instanceInfoCommand, pod.Spec.Containers[0].Name, pod.Name, request.Namespace, nil) - // if there is an error, return. We will log the error at a higher level if err != nil { return response, err @@ -243,7 +202,7 @@ func ReplicationStatus(request ReplicationStatusRequest, includePrimary, include // parse the JSON and plast it into instanceInfoList var rawInstances []instanceReplicationInfoJSON - json.Unmarshal([]byte(commandStdOut), &rawInstances) + _ = json.Unmarshal([]byte(commandStdOut), &rawInstances) log.Debugf("patroni instance info: %v", rawInstances) @@ -266,9 +225,9 @@ func ReplicationStatus(request ReplicationStatusRequest, includePrimary, include // determine the role of the instnace switch rawInstance.Type { default: - role = instanceRoleReplica + role = InstanceRoleReplica case instanceReplicationInfoTypePrimary, instanceReplicationInfoTypePrimaryStandby: - role = instanceRolePrimary + role = InstanceRolePrimary } // set up the instance that will be returned @@ -280,6 +239,7 @@ func ReplicationStatus(request ReplicationStatusRequest, includePrimary, include Name: instanceInfoMap[rawInstance.PodName].name, Node: instanceInfoMap[rawInstance.PodName].node, PendingRestart: rawInstance.PendingRestart == "*", + PodName: rawInstance.PodName, } // update the instance info if the instance is busted @@ -325,14 +285,14 @@ func ToggleAutoFailover(clientset kubernetes.Interface, enable bool, pghaScope, configJSONStr := configMap.ObjectMeta.Annotations["config"] var configJSON map[string]interface{} - json.Unmarshal([]byte(configJSONStr), &configJSON) + _ = json.Unmarshal([]byte(configJSONStr), &configJSON) if !enable { // disable autofail condition - disableFailover(clientset, configMap, configJSON, namespace) + _ = disableFailover(clientset, configMap, configJSON, namespace) } else { // enable autofail - enableFailover(clientset, configMap, configJSON, namespace) + _ = enableFailover(clientset, configMap, configJSON, namespace) } return nil @@ -342,7 +302,6 @@ func ToggleAutoFailover(clientset kubernetes.Interface, enable bool, pghaScope, // pods in a cluster to the a struct containing the associated instance name and the // Nodes that it runs on, all based upon the output from a Kubernetes API query func createInstanceInfoMap(pods *v1.PodList) map[string]instanceInfo { - instanceInfoMap := make(map[string]instanceInfo) // Iterate through each pod that is returned and get the mapping between the diff --git a/internal/util/pgbouncer.go b/internal/util/pgbouncer.go index 2fdd645126..3039ac1abb 100644 --- a/internal/util/pgbouncer.go +++ b/internal/util/pgbouncer.go @@ -1,7 +1,7 @@ package util /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -29,6 +29,7 @@ const pgBouncerConfigMapFormat = "%s-pgbouncer-cm" // pgBouncerSecretFormat is the name of the Kubernetes Secret that pgBouncer // uses that stores configuration and pgbouncer user information, and follows // the format "-pgbouncer-secret" +// #nosec: G101 const pgBouncerSecretFormat = "%s-pgbouncer-secret" // pgBouncerUserFileFormat is the format of what the pgBouncer user management diff --git a/internal/util/policy.go b/internal/util/policy.go index 2a0e2fcf83..0be895137e 100644 --- a/internal/util/policy.go +++ b/internal/util/policy.go @@ -1,7 +1,7 @@ package util /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -19,15 +19,12 @@ import ( "context" "errors" "fmt" - "net/http" "strings" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/kubeapi" pgo "github.com/crunchydata/postgres-operator/pkg/generated/clientset/versioned" - "io/ioutil" - log "github.com/sirupsen/logrus" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,12 +32,11 @@ import ( ) // ExecPolicy execute a sql policy against a cluster -func ExecPolicy(clientset kubeapi.Interface, restconfig *rest.Config, namespace, policyName, serviceName, port string) error { +func ExecPolicy(clientset kubeapi.Interface, restconfig *rest.Config, namespace, policyName, clusterName, port string) error { ctx := context.TODO() - //fetch the policy sql + // fetch the policy sql sql, err := GetPolicySQL(clientset, namespace, policyName) - if err != nil { return err } @@ -50,11 +46,10 @@ func ExecPolicy(clientset kubeapi.Interface, restconfig *rest.Config, namespace, stdin := strings.NewReader(sql) // now, we need to ensure we can get the Pod name of the primary PostgreSQL - // instance. Thname being passed in is actually the "serviceName" of the Pod - // We can isolate the exact Pod we want by using this (LABEL_SERVICE_NAME) and - // the LABEL_PGHA_ROLE labels + // instance. We can isolate the exact Pod we want by using the + // LABEL_PG_CLUSTER and LABEL_PGHA_ROLE labels selector := fmt.Sprintf("%s=%s,%s=%s", - config.LABEL_SERVICE_NAME, serviceName, + config.LABEL_PG_CLUSTER, clusterName, config.LABEL_PGHA_ROLE, config.LABEL_PGHA_ROLE_PRIMARY) podList, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector}) @@ -108,10 +103,7 @@ func GetPolicySQL(clientset pgo.Interface, namespace, policyName string) (string ctx := context.TODO() p, err := clientset.CrunchydataV1().Pgpolicies(namespace).Get(ctx, policyName, metav1.GetOptions{}) if err == nil { - if p.Spec.URL != "" { - return readSQLFromURL(p.Spec.URL) - } - return p.Spec.SQL, err + return p.Spec.SQL, nil } if kerrors.IsNotFound(err) { @@ -121,24 +113,6 @@ func GetPolicySQL(clientset pgo.Interface, namespace, policyName string) (string return "", err } -// readSQLFromURL returns the SQL string from a URL -func readSQLFromURL(urlstring string) (string, error) { - var bodyBytes []byte - response, err := http.Get(urlstring) - if err == nil { - bodyBytes, err = ioutil.ReadAll(response.Body) - defer response.Body.Close() - } - - if err != nil { - log.Error(err) - return "", err - } - - return string(bodyBytes), err - -} - // ValidatePolicy tests to see if a policy exists func ValidatePolicy(clientset pgo.Interface, namespace string, policyName string) error { ctx := context.TODO() diff --git a/internal/util/secrets.go b/internal/util/secrets.go index eed3348d31..d908eba1aa 100644 --- a/internal/util/secrets.go +++ b/internal/util/secrets.go @@ -1,7 +1,7 @@ package util /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -18,7 +18,6 @@ package util import ( "context" "crypto/rand" - "fmt" "math/big" "strconv" "strings" @@ -32,10 +31,6 @@ import ( "k8s.io/client-go/kubernetes" ) -// UserSecretFormat follows the pattern of how the user information is stored, -// which is "--secret" -const UserSecretFormat = "%s-%s" + crv1.UserSecretSuffix - // The following constants are used as a part of password generation. For more // information on these selections, please consulting the ASCII man page // (`man ascii`) @@ -46,6 +41,10 @@ const ( // passwordCharUpper is the highest ASCII character to use for generating a // password, which is 126 passwordCharUpper = 126 + // passwordCharExclude is a map of characters that we choose to exclude from + // the password to simplify usage in the shell. There is still enough entropy + // that exclusion of these characters is OK. + passwordCharExclude = "`\\" ) // passwordCharSelector is a "big int" that we need to select the random ASCII @@ -69,23 +68,30 @@ func CreateSecret(clientset kubernetes.Interface, db, secretName, username, pass _, err := clientset.CoreV1().Secrets(namespace).Create(ctx, &secret, metav1.CreateOptions{}) return err - } // GeneratePassword generates a password of a given length out of the acceptable // ASCII characters suitable for a password func GeneratePassword(length int) (string, error) { password := make([]byte, length) + i := 0 - for i := 0; i < length; i++ { - char, err := rand.Int(rand.Reader, passwordCharSelector) - + for i < length { + val, err := rand.Int(rand.Reader, passwordCharSelector) // if there is an error generating the random integer, return if err != nil { return "", err } - password[i] = byte(passwordCharLower + char.Int64()) + char := byte(passwordCharLower + val.Int64()) + + // if the character is in the exclusion list, continue + if idx := strings.IndexAny(string(char), passwordCharExclude); idx > -1 { + continue + } + + password[i] = char + i++ } return string(password), nil @@ -100,7 +106,6 @@ func GeneratedPasswordLength(configuredPasswordLength string) int { // note that "configuredPasswordLength" may be an empty string, and as such // the below line could fail. That's ok though! as we have a default set up generatedPasswordLength, err := strconv.Atoi(configuredPasswordLength) - // if there is an error...set it to a default if err != nil { generatedPasswordLength = DefaultGeneratedPasswordLength @@ -113,7 +118,6 @@ func GeneratedPasswordLength(configuredPasswordLength string) int { func GetPasswordFromSecret(clientset kubernetes.Interface, namespace, secretName string) (string, error) { ctx := context.TODO() secret, err := clientset.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) - if err != nil { return "", err } @@ -132,10 +136,10 @@ func IsPostgreSQLUserSystemAccount(username string) bool { } // CreateUserSecret will create a new secret holding a user credential -func CreateUserSecret(clientset kubernetes.Interface, clustername, username, password, namespace string) error { - secretName := fmt.Sprintf(UserSecretFormat, clustername, username) +func CreateUserSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster, username, password string) error { + secretName := crv1.UserSecretName(cluster, username) - if err := CreateSecret(clientset, clustername, secretName, username, password, namespace); err != nil { + if err := CreateSecret(clientset, cluster.Name, secretName, username, password, cluster.Namespace); err != nil { log.Error(err) return err } @@ -148,13 +152,12 @@ func CreateUserSecret(clientset kubernetes.Interface, clustername, username, pas // // 1. If the Secret exists, it updates the value of the Secret // 2. If the Secret does not exist, it creates the secret -func UpdateUserSecret(clientset kubernetes.Interface, clustername, username, password, namespace string) error { +func UpdateUserSecret(clientset kubernetes.Interface, cluster *crv1.Pgcluster, username, password string) error { ctx := context.TODO() - secretName := fmt.Sprintf(UserSecretFormat, clustername, username) + secretName := crv1.UserSecretName(cluster, username) // see if the secret already exists - secret, err := clientset.CoreV1().Secrets(namespace).Get(ctx, secretName, metav1.GetOptions{}) - + secret, err := clientset.CoreV1().Secrets(cluster.Namespace).Get(ctx, secretName, metav1.GetOptions{}) // if this returns an error and it's not the "not found" error, return // However, if it is the "not found" error, treat this as creating the user // secret @@ -163,7 +166,7 @@ func UpdateUserSecret(clientset kubernetes.Interface, clustername, username, pas return err } - return CreateUserSecret(clientset, clustername, username, password, namespace) + return CreateUserSecret(clientset, cluster, username, password) } // update the value of "password" diff --git a/internal/util/secrets_test.go b/internal/util/secrets_test.go index 89cbcebac9..4b8676e7ca 100644 --- a/internal/util/secrets_test.go +++ b/internal/util/secrets_test.go @@ -1,7 +1,7 @@ package util /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -23,7 +23,7 @@ import ( func TestGeneratePassword(t *testing.T) { // different lengths - for _, length := range []int{1, 2, 3, 5, 20} { + for _, length := range []int{1, 2, 3, 5, 20, 200} { password, err := GeneratePassword(length) if err != nil { t.Fatalf("expected no error, got %v", err) @@ -31,9 +31,12 @@ func TestGeneratePassword(t *testing.T) { if expected, actual := length, len(password); expected != actual { t.Fatalf("expected length %v, got %v", expected, actual) } - if i := strings.IndexFunc(password, unicode.IsPrint); i > 0 { + if i := strings.IndexFunc(password, func(r rune) bool { return !unicode.IsPrint(r) }); i > -1 { t.Fatalf("expected only printable characters, got %q in %q", password[i], password) } + if i := strings.IndexAny(password, passwordCharExclude); i > -1 { + t.Fatalf("expected no exclude characters, got %q in %q", password[i], password) + } } // random contents @@ -44,9 +47,12 @@ func TestGeneratePassword(t *testing.T) { if err != nil { t.Fatalf("expected no error, got %v", err) } - if i := strings.IndexFunc(password, unicode.IsPrint); i > 0 { + if i := strings.IndexFunc(password, func(r rune) bool { return !unicode.IsPrint(r) }); i > -1 { t.Fatalf("expected only printable characters, got %q in %q", password[i], password) } + if i := strings.IndexAny(password, passwordCharExclude); i > -1 { + t.Fatalf("expected no exclude characters, got %q in %q", password[i], password) + } for i := range previous { if password == previous[i] { diff --git a/internal/util/ssh.go b/internal/util/ssh.go index aa886bbca7..c916abcd28 100644 --- a/internal/util/ssh.go +++ b/internal/util/ssh.go @@ -1,7 +1,7 @@ package util /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -91,6 +91,7 @@ func newPrivateKey(key ed25519.PrivateKey) ([]byte, error) { // check fields should match to easily verify // that a decryption was successful + // #nosec: G404 private.Check1 = rand.Uint32() private.Check2 = private.Check1 diff --git a/internal/util/util.go b/internal/util/util.go index 95a8310742..ceb20cef80 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -1,7 +1,7 @@ package util /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -44,7 +44,6 @@ var gisImageTagRegex = regexp.MustCompile(`(.+-[\d|\.]+)-[\d|\.]+?(-[\d|\.]+.*)` func init() { rand.Seed(time.Now().UnixNano()) - } // GetLabels ... @@ -58,19 +57,19 @@ func GetLabels(name, clustername string, replica bool) string { return output } -//CurrentPrimaryUpdate prepares the needed data structures with the correct current primary value -//before passing them along to be patched into the current pgcluster CRD's annotations +// CurrentPrimaryUpdate prepares the needed data structures with the correct current primary value +// before passing them along to be patched into the current pgcluster CRD's annotations func CurrentPrimaryUpdate(clientset pgo.Interface, cluster *crv1.Pgcluster, currentPrimary, namespace string) error { - //create a new map + // create a new map metaLabels := make(map[string]string) - //copy the relevant values into the new map + // copy the relevant values into the new map for k, v := range cluster.ObjectMeta.Labels { metaLabels[k] = v } - //update this map with the new deployment label + // update this map with the new deployment label metaLabels[config.LABEL_DEPLOYMENT_NAME] = currentPrimary - //Update CRD with the current primary name and the new deployment to point to after the failover + // Update CRD with the current primary name and the new deployment to point to after the failover if err := PatchClusterCRD(clientset, metaLabels, cluster, currentPrimary, namespace); err != nil { log.Errorf("failoverlogic: could not patch pgcluster %s with the current primary", currentPrimary) } @@ -112,7 +111,6 @@ func PatchClusterCRD(clientset pgo.Interface, labelMap map[string]string, oldCrd Patch(ctx, oldCrd.Spec.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) return err6 - } // GetValueOrDefault checks whether the first value given is set. If it is, @@ -149,7 +147,6 @@ func GetSecretPassword(clientset kubernetes.Interface, db, suffix, Namespace str log.Error("primary secret not found for " + db) return "", errors.New("primary secret not found for " + db) - } // GetStandardImageTag takes the current image name and the image tag value @@ -158,7 +155,6 @@ func GetSecretPassword(clientset kubernetes.Interface, db, suffix, Namespace str // the tag without the addition of the GIS version. This tag value can then // be used when provisioning containers using the standard containers tag. func GetStandardImageTag(imageName, imageTag string) string { - if imageName == "crunchy-postgres-gis-ha" && strings.Count(imageTag, "-") > 2 { return gisImageTagRegex.ReplaceAllString(imageTag, "$1$2") } @@ -170,6 +166,7 @@ func GetStandardImageTag(imageName, imageTag string) string { func RandStringBytesRmndr(n int) string { b := make([]byte, n) for i := range b { + // #nosec: G404 b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))] } return string(b) diff --git a/internal/util/util_test.go b/internal/util/util_test.go index 30d6d8b65d..283c88b916 100644 --- a/internal/util/util_test.go +++ b/internal/util/util_test.go @@ -3,7 +3,6 @@ package util import "testing" func TestGetStandardImageTag(t *testing.T) { - assertCorrectMessage := func(t testing.TB, got, want string) { t.Helper() if got != want { @@ -18,50 +17,50 @@ func TestGetStandardImageTag(t *testing.T) { expected string }{ { - "image: crunchy-postgres-ha, tag: centos7-12.4-4.5.0", + "image: crunchy-postgres-ha, tag: ubi8-12.4-4.5.0", "crunchy-postgres-ha", - "centos7-12.4-4.5.0", - "centos7-12.4-4.5.0", + "ubi8-12.4-4.5.0", + "ubi8-12.4-4.5.0", }, { - "image: crunchy-postgres-gis-ha, tag: centos7-12.4-3.0-4.5.0", + "image: crunchy-postgres-gis-ha, tag: ubi8-12.4-3.0-4.5.0", "crunchy-postgres-gis-ha", - "centos7-12.4-3.0-4.5.0", - "centos7-12.4-4.5.0", + "ubi8-12.4-3.0-4.5.0", + "ubi8-12.4-4.5.0", }, { - "image: crunchy-postgres-ha, tag: centos7-12.4-4.5.0-beta.1", + "image: crunchy-postgres-ha, tag: ubi8-12.4-4.5.0-beta.1", "crunchy-postgres-ha", - "centos7-12.4-4.5.0-beta.1", - "centos7-12.4-4.5.0-beta.1", + "ubi8-12.4-4.5.0-beta.1", + "ubi8-12.4-4.5.0-beta.1", }, { - "image: crunchy-postgres-gis-ha, tag: centos7-12.4-3.0-4.5.0-beta.2", + "image: crunchy-postgres-gis-ha, tag: ubi8-12.4-3.0-4.5.0-beta.2", "crunchy-postgres-gis-ha", - "centos7-12.4-3.0-4.5.0-beta.2", - "centos7-12.4-4.5.0-beta.2", + "ubi8-12.4-3.0-4.5.0-beta.2", + "ubi8-12.4-4.5.0-beta.2", }, { - "image: crunchy-postgres-ha, tag: centos8-9.5.23-4.5.0-rc.1", + "image: crunchy-postgres-ha, tag: ubi8-9.5.23-4.5.0-rc.1", "crunchy-postgres-ha", - "centos8-9.5.23-4.5.0-rc.1", - "centos8-9.5.23-4.5.0-rc.1", + "ubi8-9.5.23-4.5.0-rc.1", + "ubi8-9.5.23-4.5.0-rc.1", }, { - "image: crunchy-postgres-gis-ha, tag: centos8-9.5.23-2.4-4.5.0-rc.1", + "image: crunchy-postgres-gis-ha, tag: ubi8-9.5.23-2.4-4.5.0-rc.1", "crunchy-postgres-gis-ha", - "centos8-9.5.23-2.4-4.5.0-rc.1", - "centos8-9.5.23-4.5.0-rc.1", + "ubi8-9.5.23-2.4-4.5.0-rc.1", + "ubi8-9.5.23-4.5.0-rc.1", }, { - "image: crunchy-postgres-gis-ha, tag: centos8-13.0-3.0-4.5.0-rc.1", + "image: crunchy-postgres-gis-ha, tag: ubi8-13.0-3.0-4.5.0-rc.1", "crunchy-postgres-gis-ha", - "centos8-13.0-3.0-4.5.0-rc.1", - "centos8-13.0-4.5.0-rc.1", + "ubi8-13.0-3.0-4.5.0-rc.1", + "ubi8-13.0-4.5.0-rc.1", }, { - "image: crunchy-postgres-gis-ha, tag: centos8-custom123", + "image: crunchy-postgres-gis-ha, tag: ubi8-custom123", "crunchy-postgres-gis-ha", - "centos8-custom123", - "centos8-custom123", + "ubi8-custom123", + "ubi8-custom123", }, { - "image: crunchy-postgres-gis-ha, tag: centos8-custom123-moreinfo-789", + "image: crunchy-postgres-gis-ha, tag: ubi8-custom123-moreinfo-789", "crunchy-postgres-gis-ha", - "centos8-custom123-moreinfo-789", - "centos8-custom123-moreinfo-789", + "ubi8-custom123-moreinfo-789", + "ubi8-custom123-moreinfo-789", }, } diff --git a/redhat/licenses/LICENSE.txt b/licenses/LICENSE.txt similarity index 100% rename from redhat/licenses/LICENSE.txt rename to licenses/LICENSE.txt diff --git a/licenses/github.com/PuerkitoBio/purell/LICENSE b/licenses/github.com/PuerkitoBio/purell/LICENSE deleted file mode 100644 index 4b9986dea7..0000000000 --- a/licenses/github.com/PuerkitoBio/purell/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Martin Angers -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/github.com/PuerkitoBio/urlesc/LICENSE b/licenses/github.com/PuerkitoBio/urlesc/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/licenses/github.com/PuerkitoBio/urlesc/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/github.com/cpuguy83/go-md2man/LICENSE.md b/licenses/github.com/cpuguy83/go-md2man/LICENSE.md deleted file mode 100644 index 1cade6cef6..0000000000 --- a/licenses/github.com/cpuguy83/go-md2man/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Brian Goff - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/licenses/github.com/davecgh/go-spew/LICENSE b/licenses/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96f2b..0000000000 --- a/licenses/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/licenses/github.com/docker/spdystream/LICENSE b/licenses/github.com/docker/spdystream/LICENSE deleted file mode 100644 index 9e4bd4dbee..0000000000 --- a/licenses/github.com/docker/spdystream/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014-2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/github.com/emicklei/go-restful/LICENSE b/licenses/github.com/emicklei/go-restful/LICENSE deleted file mode 100644 index ece7ec61ef..0000000000 --- a/licenses/github.com/emicklei/go-restful/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012,2013 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/licenses/github.com/evanphx/json-patch/LICENSE b/licenses/github.com/evanphx/json-patch/LICENSE deleted file mode 100644 index 0eb9b72d84..0000000000 --- a/licenses/github.com/evanphx/json-patch/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/github.com/fatih/color/LICENSE.md b/licenses/github.com/fatih/color/LICENSE.md deleted file mode 100644 index 25fdaf639d..0000000000 --- a/licenses/github.com/fatih/color/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/github.com/ghodss/yaml/LICENSE b/licenses/github.com/ghodss/yaml/LICENSE deleted file mode 100644 index 7805d36de7..0000000000 --- a/licenses/github.com/ghodss/yaml/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/github.com/go-openapi/jsonpointer/LICENSE b/licenses/github.com/go-openapi/jsonpointer/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/licenses/github.com/go-openapi/jsonpointer/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/github.com/go-openapi/jsonreference/LICENSE b/licenses/github.com/go-openapi/jsonreference/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/licenses/github.com/go-openapi/jsonreference/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/github.com/go-openapi/spec/LICENSE b/licenses/github.com/go-openapi/spec/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/licenses/github.com/go-openapi/spec/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/github.com/go-openapi/swag/LICENSE b/licenses/github.com/go-openapi/swag/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/licenses/github.com/go-openapi/swag/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/github.com/gogo/protobuf/LICENSE b/licenses/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index 7be0cc7b62..0000000000 --- a/licenses/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,36 +0,0 @@ -Protocol Buffers for Go with Gadgets - -Copyright (c) 2013, The GoGo Authors. All rights reserved. -http://github.com/gogo/protobuf - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/licenses/github.com/golang/glog/LICENSE b/licenses/github.com/golang/glog/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/licenses/github.com/golang/glog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/github.com/golang/protobuf/LICENSE b/licenses/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f646931a4..0000000000 --- a/licenses/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/licenses/github.com/google/btree/LICENSE b/licenses/github.com/google/btree/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/licenses/github.com/google/btree/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/github.com/google/gofuzz/LICENSE b/licenses/github.com/google/gofuzz/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/licenses/github.com/google/gofuzz/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/github.com/googleapis/gnostic/LICENSE b/licenses/github.com/googleapis/gnostic/LICENSE deleted file mode 100644 index 6b0b1270ff..0000000000 --- a/licenses/github.com/googleapis/gnostic/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/licenses/github.com/gorilla/context/LICENSE b/licenses/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb87280..0000000000 --- a/licenses/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/github.com/gorilla/mux/LICENSE b/licenses/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 0e5fb87280..0000000000 --- a/licenses/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/github.com/gregjones/httpcache/LICENSE.txt b/licenses/github.com/gregjones/httpcache/LICENSE.txt deleted file mode 100644 index 81316beb0c..0000000000 --- a/licenses/github.com/gregjones/httpcache/LICENSE.txt +++ /dev/null @@ -1,7 +0,0 @@ -Copyright © 2012 Greg Jones (greg.jones@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/licenses/github.com/hashicorp/golang-lru/LICENSE b/licenses/github.com/hashicorp/golang-lru/LICENSE deleted file mode 100644 index be2cc4dfb6..0000000000 --- a/licenses/github.com/hashicorp/golang-lru/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. diff --git a/licenses/github.com/howeyc/gopass/LICENSE.txt b/licenses/github.com/howeyc/gopass/LICENSE.txt deleted file mode 100644 index 14f74708a4..0000000000 --- a/licenses/github.com/howeyc/gopass/LICENSE.txt +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012 Chris Howey - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/licenses/github.com/howeyc/gopass/OPENSOLARIS.LICENSE b/licenses/github.com/howeyc/gopass/OPENSOLARIS.LICENSE deleted file mode 100644 index da23621dc8..0000000000 --- a/licenses/github.com/howeyc/gopass/OPENSOLARIS.LICENSE +++ /dev/null @@ -1,384 +0,0 @@ -Unless otherwise noted, all files in this distribution are released -under the Common Development and Distribution License (CDDL). -Exceptions are noted within the associated source files. - --------------------------------------------------------------------- - - -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE Version 1.0 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates - or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Software, prior Modifications used by a Contributor (if any), - and the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) - Modifications, or (c) the combination of files containing - Original Software with files containing Modifications, in - each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form other - than Source Code. - - 1.5. "Initial Developer" means the individual or entity that first - makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or - portions thereof with code not governed by the terms of this - License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed - herein. - - 1.9. "Modifications" means the Source Code and Executable form of - any of the following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original - Software or previous Modifications; - - B. Any new file that contains any part of the Original - Software or previous Modifications; or - - C. Any new file that is contributed or otherwise made - available under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable - form of computer software code that is originally released - under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, - process, and apparatus claims, in any patent Licensable by - grantor. - - 1.12. "Source Code" means (a) the common form of computer software - code in which modifications are made and (b) associated - documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms - of, this License. For legal entities, "You" includes any - entity which controls, is controlled by, or is under common - control with You. For purposes of this definition, - "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by - contract or otherwise, or (b) ownership of more than fifty - percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, the Initial - Developer hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer, to use, - reproduce, modify, display, perform, sublicense and - distribute the Original Software (or portions thereof), - with or without Modifications, and/or as part of a Larger - Work; and - - (b) under Patent Claims infringed by the making, using or - selling of Original Software, to make, have made, use, - practice, sell, and offer for sale, and/or otherwise - dispose of the Original Software (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are - effective on the date Initial Developer first distributes - or otherwise makes the Original Software available to a - third party under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: (1) for code that You delete from the Original - Software, or (2) for infringements caused by: (i) the - modification of the Original Software, or (ii) the - combination of the Original Software with other software - or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and - subject to third party intellectual property claims, each - Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor to use, reproduce, - modify, display, perform, sublicense and distribute the - Modifications created by such Contributor (or portions - thereof), either on an unmodified basis, with other - Modifications, as Covered Software and/or as part of a - Larger Work; and - - (b) under Patent Claims infringed by the making, using, or - selling of Modifications made by that Contributor either - alone and/or in combination with its Contributor Version - (or portions of such combination), to make, use, sell, - offer for sale, have made, and/or otherwise dispose of: - (1) Modifications made by that Contributor (or portions - thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions - of such combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are - effective on the date Contributor first distributes or - otherwise makes the Modifications available to a third - party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: (1) for any code that Contributor has deleted - from the Contributor Version; (2) for infringements caused - by: (i) third party modifications of Contributor Version, - or (ii) the combination of Modifications made by that - Contributor with other software (except as part of the - Contributor Version) or other devices; or (3) under Patent - Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make - available in Executable form must also be made available in Source - Code form and that Source Code form must be distributed only under - the terms of this License. You must include a copy of this - License with every copy of the Source Code form of the Covered - Software You distribute or otherwise make available. You must - inform recipients of any such Covered Software in Executable form - as to how they can obtain such Covered Software in Source Code - form in a reasonable manner on or through a medium customarily - used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are - governed by the terms of this License. You represent that You - believe Your Modifications are Your original creation(s) and/or - You have sufficient rights to grant the rights conveyed by this - License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that - identifies You as the Contributor of the Modification. You may - not remove or alter any copyright, patent or trademark notices - contained within the Covered Software, or any notices of licensing - or any descriptive text giving attribution to any Contributor or - the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in - Source Code form that alters or restricts the applicable version - of this License or the recipients' rights hereunder. You may - choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of - Covered Software. However, you may do so only on Your own behalf, - and not on behalf of the Initial Developer or any Contributor. - You must make it absolutely clear that any such warranty, support, - indemnity or liability obligation is offered by You alone, and You - hereby agree to indemnify the Initial Developer and every - Contributor for any liability incurred by the Initial Developer or - such Contributor as a result of warranty, support, indemnity or - liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software - under the terms of this License or under the terms of a license of - Your choice, which may contain terms different from this License, - provided that You are in compliance with the terms of this License - and that the license for the Executable form does not attempt to - limit or alter the recipient's rights in the Source Code form from - the rights set forth in this License. If You distribute the - Covered Software in Executable form under a different license, You - must make it absolutely clear that any terms which differ from - this License are offered by You alone, not by the Initial - Developer or Contributor. You hereby agree to indemnify the - Initial Developer and every Contributor for any liability incurred - by the Initial Developer or such Contributor as a result of any - such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with - other code not governed by the terms of this License and - distribute the Larger Work as a single product. In such a case, - You must make sure the requirements of this License are fulfilled - for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - - Sun Microsystems, Inc. is the initial license steward and may - publish revised and/or new versions of this License from time to - time. Each version will be given a distinguishing version number. - Except as provided in Section 4.3, no one other than the license - steward has the right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the - Covered Software available under the terms of the version of the - License under which You originally received the Covered Software. - If the Initial Developer includes a notice in the Original - Software prohibiting it from being distributed or otherwise made - available under any subsequent version of the License, You must - distribute and make the Covered Software available under the terms - of the version of the License under which You originally received - the Covered Software. Otherwise, You may also choose to use, - distribute or otherwise make the Covered Software available under - the terms of any subsequent version of the License published by - the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new - license for Your Original Software, You may create and use a - modified version of this License if You: (a) rename the license - and remove any references to the name of the license steward - (except to note that the license differs from this License); and - (b) otherwise make it clear that the license contains terms which - differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" - BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED - SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR - PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND - PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY - COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE - INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY - NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF - WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF - ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS - DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond - the termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or a - Contributor (the Initial Developer or Contributor against whom You - assert such claim is referred to as "Participant") alleging that - the Participant Software (meaning the Contributor Version where - the Participant is a Contributor or the Original Software where - the Participant is the Initial Developer) directly or indirectly - infringes any patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial Developer (if - the Initial Developer is not the Participant) and all Contributors - under Sections 2.1 and/or 2.2 of this License shall, upon 60 days - notice from Participant terminate prospectively and automatically - at the expiration of such 60 day notice period, unless if within - such 60 day period You withdraw Your claim with respect to the - Participant Software against such Participant either unilaterally - or pursuant to a written agreement with Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 above, - all end user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE - LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK - STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER - COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN - INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF - LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL - INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT - APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO - NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR - CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT - APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is - defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial - computer software" (as that term is defined at 48 - C.F.R. 252.227-7014(a)(1)) and "commercial computer software - documentation" as such terms are used in 48 C.F.R. 12.212 - (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 - C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all - U.S. Government End Users acquire Covered Software with only those - rights set forth herein. This U.S. Government Rights clause is in - lieu of, and supersedes, any other FAR, DFAR, or other clause or - provision that addresses Government rights in computer software - under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed - by the law of the jurisdiction specified in a notice contained - within the Original Software (except to the extent applicable law, - if any, provides otherwise), excluding such jurisdiction's - conflict-of-law provisions. Any litigation relating to this - License shall be subject to the jurisdiction of the courts located - in the jurisdiction and venue specified in a notice contained - within the Original Software, with the losing party responsible - for costs, including, without limitation, court costs and - reasonable attorneys' fees and expenses. The application of the - United Nations Convention on Contracts for the International Sale - of Goods is expressly excluded. Any law or regulation which - provides that the language of a contract shall be construed - against the drafter shall not apply to this License. You agree - that You alone are responsible for compliance with the United - States export administration regulations (and the export control - laws and regulation of any other countries) when You use, - distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or - indirectly, out of its utilization of rights under this License - and You agree to work with Initial Developer and Contributors to - distribute such responsibility on an equitable basis. Nothing - herein is intended or shall be deemed to constitute any admission - of liability. - --------------------------------------------------------------------- - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND -DISTRIBUTION LICENSE (CDDL) - -For Covered Software in this distribution, this License shall -be governed by the laws of the State of California (excluding -conflict-of-law provisions). - -Any litigation relating to this License shall be subject to the -jurisdiction of the Federal Courts of the Northern District of -California and the state courts of the State of California, with -venue lying in Santa Clara County, California. diff --git a/licenses/github.com/imdario/mergo/LICENSE b/licenses/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298d..0000000000 --- a/licenses/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/github.com/inconshreveable/mousetrap/LICENSE b/licenses/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f0d1fb6a7..0000000000 --- a/licenses/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/licenses/github.com/json-iterator/go/LICENSE b/licenses/github.com/json-iterator/go/LICENSE deleted file mode 100644 index 2cf4f5ab28..0000000000 --- a/licenses/github.com/json-iterator/go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 json-iterator - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/licenses/github.com/juju/ratelimit/LICENSE b/licenses/github.com/juju/ratelimit/LICENSE deleted file mode 100644 index ade9307b39..0000000000 --- a/licenses/github.com/juju/ratelimit/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -All files in this repository are licensed as follows. If you contribute -to this repository, it is assumed that you license your contribution -under the same license unless you state otherwise. - -All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/licenses/github.com/lib/pq/LICENSE.md b/licenses/github.com/lib/pq/LICENSE.md deleted file mode 100644 index 5773904a30..0000000000 --- a/licenses/github.com/lib/pq/LICENSE.md +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2011-2013, 'pq' Contributors -Portions Copyright (C) 2011 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/github.com/mailru/easyjson/LICENSE b/licenses/github.com/mailru/easyjson/LICENSE deleted file mode 100644 index fbff658f70..0000000000 --- a/licenses/github.com/mailru/easyjson/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/github.com/mattn/go-colorable/LICENSE b/licenses/github.com/mattn/go-colorable/LICENSE deleted file mode 100644 index 91b5cef30e..0000000000 --- a/licenses/github.com/mattn/go-colorable/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/licenses/github.com/mattn/go-isatty/LICENSE b/licenses/github.com/mattn/go-isatty/LICENSE deleted file mode 100644 index 65dc692b6b..0000000000 --- a/licenses/github.com/mattn/go-isatty/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) Yasuhiro MATSUMOTO - -MIT License (Expat) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/github.com/modern-go/concurrent/LICENSE b/licenses/github.com/modern-go/concurrent/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/licenses/github.com/modern-go/concurrent/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/github.com/modern-go/reflect2/LICENSE b/licenses/github.com/modern-go/reflect2/LICENSE deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/licenses/github.com/modern-go/reflect2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/github.com/petar/GoLLRB/LICENSE b/licenses/github.com/petar/GoLLRB/LICENSE deleted file mode 100644 index b75312c787..0000000000 --- a/licenses/github.com/petar/GoLLRB/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2010, Petar Maymounkov -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -(*) Redistributions of source code must retain the above copyright notice, this list -of conditions and the following disclaimer. - -(*) Redistributions in binary form must reproduce the above copyright notice, this -list of conditions and the following disclaimer in the documentation and/or -other materials provided with the distribution. - -(*) Neither the name of Petar Maymounkov nor the names of its contributors may be -used to endorse or promote products derived from this software without specific -prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/github.com/peterbourgon/diskv/LICENSE b/licenses/github.com/peterbourgon/diskv/LICENSE deleted file mode 100644 index 41ce7f16e1..0000000000 --- a/licenses/github.com/peterbourgon/diskv/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2011-2012 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/licenses/github.com/russross/blackfriday/LICENSE.txt b/licenses/github.com/russross/blackfriday/LICENSE.txt deleted file mode 100644 index 2885af3602..0000000000 --- a/licenses/github.com/russross/blackfriday/LICENSE.txt +++ /dev/null @@ -1,29 +0,0 @@ -Blackfriday is distributed under the Simplified BSD License: - -> Copyright © 2011 Russ Ross -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/github.com/sirupsen/logrus/LICENSE b/licenses/github.com/sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42f3..0000000000 --- a/licenses/github.com/sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/licenses/github.com/spf13/cobra/LICENSE.txt b/licenses/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e2665..0000000000 --- a/licenses/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/licenses/github.com/spf13/pflag/LICENSE b/licenses/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfea1..0000000000 --- a/licenses/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/golang.org/x/crypto/LICENSE b/licenses/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/licenses/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/golang.org/x/net/LICENSE b/licenses/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/licenses/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/golang.org/x/sys/LICENSE b/licenses/golang.org/x/sys/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/licenses/golang.org/x/sys/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/golang.org/x/text/LICENSE b/licenses/golang.org/x/text/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/licenses/golang.org/x/text/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/gopkg.in/inf.v0/LICENSE b/licenses/gopkg.in/inf.v0/LICENSE deleted file mode 100644 index 87a5cede33..0000000000 --- a/licenses/gopkg.in/inf.v0/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go -Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/licenses/gopkg.in/robfig/cron.v2/LICENSE b/licenses/gopkg.in/robfig/cron.v2/LICENSE deleted file mode 100644 index 3a0f627ffe..0000000000 --- a/licenses/gopkg.in/robfig/cron.v2/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (C) 2012 Rob Figueiredo -All Rights Reserved. - -MIT LICENSE - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/licenses/gopkg.in/yaml.v2/LICENSE b/licenses/gopkg.in/yaml.v2/LICENSE deleted file mode 100644 index 8dada3edaf..0000000000 --- a/licenses/gopkg.in/yaml.v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/k8s.io/api/LICENSE b/licenses/k8s.io/api/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/licenses/k8s.io/api/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/k8s.io/apimachinery/LICENSE b/licenses/k8s.io/apimachinery/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/licenses/k8s.io/apimachinery/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/k8s.io/client-go/LICENSE b/licenses/k8s.io/client-go/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/licenses/k8s.io/client-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/licenses/k8s.io/kube-openapi/LICENSE b/licenses/k8s.io/kube-openapi/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/licenses/k8s.io/kube-openapi/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/pkg/apis/crunchydata.com/v1/cluster.go b/pkg/apis/crunchydata.com/v1/cluster.go index bdc02406da..6626f28b13 100644 --- a/pkg/apis/crunchydata.com/v1/cluster.go +++ b/pkg/apis/crunchydata.com/v1/cluster.go @@ -1,7 +1,7 @@ package v1 /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,7 @@ package v1 import ( "fmt" + "strings" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -49,12 +50,19 @@ type PgclusterSpec struct { CCPImagePrefix string `json:"ccpimageprefix"` PGOImagePrefix string `json:"pgoimageprefix"` Port string `json:"port"` - PGBadgerPort string `json:"pgbadgerport"` - ExporterPort string `json:"exporterport"` + // DisableAutofail, if set to true, disables the autofail/HA capabilities + // We choose this, instead of the affirmative, so that way we default to + // autofail being on, given we're doing some legacy CRD stuff here + DisableAutofail bool `json:"disableAutofail"` + // PGBadger, if set to true, enables the pgBadger sidecar + PGBadger bool `json:"pgBadger"` + PGBadgerPort string `json:"pgbadgerport"` + // Exporter, if set to true, enables the exporter sidecar + Exporter bool `json:"exporter"` + ExporterPort string `json:"exporterport"` PrimaryStorage PgStorageSpec WALStorage PgStorageSpec - ArchiveStorage PgStorageSpec ReplicaStorage PgStorageSpec BackrestStorage PgStorageSpec @@ -102,36 +110,68 @@ type PgclusterSpec struct { // PgBouncer contains all of the settings to properly maintain a pgBouncer // implementation - PgBouncer PgBouncerSpec `json:"pgBouncer"` - User string `json:"user"` - Database string `json:"database"` - Replicas string `json:"replicas"` - UserSecretName string `json:"usersecretname"` - RootSecretName string `json:"rootsecretname"` - PrimarySecretName string `json:"primarysecretname"` - CollectSecretName string `json:"collectSecretName"` - Status string `json:"status"` - CustomConfig string `json:"customconfig"` - UserLabels map[string]string `json:"userlabels"` - PodAntiAffinity PodAntiAffinitySpec `json:"podAntiAffinity"` - SyncReplication *bool `json:"syncReplication"` - BackrestConfig []v1.VolumeProjection `json:"backrestConfig"` - BackrestS3Bucket string `json:"backrestS3Bucket"` - BackrestS3Region string `json:"backrestS3Region"` - BackrestS3Endpoint string `json:"backrestS3Endpoint"` - BackrestS3URIStyle string `json:"backrestS3URIStyle"` - BackrestS3VerifyTLS string `json:"backrestS3VerifyTLS"` - BackrestRepoPath string `json:"backrestRepoPath"` - TablespaceMounts map[string]PgStorageSpec `json:"tablespaceMounts"` - TLS TLSSpec `json:"tls"` - TLSOnly bool `json:"tlsOnly"` - Standby bool `json:"standby"` - Shutdown bool `json:"shutdown"` - PGDataSource PGDataSourceSpec `json:"pgDataSource"` + PgBouncer PgBouncerSpec `json:"pgBouncer"` + User string `json:"user"` + Database string `json:"database"` + Replicas string `json:"replicas"` + Status string `json:"status"` + CustomConfig string `json:"customconfig"` + UserLabels map[string]string `json:"userlabels"` + NodeAffinity NodeAffinitySpec `json:"nodeAffinity"` + PodAntiAffinity PodAntiAffinitySpec `json:"podAntiAffinity"` + SyncReplication *bool `json:"syncReplication"` + BackrestConfig []v1.VolumeProjection `json:"backrestConfig"` + BackrestS3Bucket string `json:"backrestS3Bucket"` + BackrestS3Region string `json:"backrestS3Region"` + BackrestS3Endpoint string `json:"backrestS3Endpoint"` + BackrestS3URIStyle string `json:"backrestS3URIStyle"` + BackrestS3VerifyTLS string `json:"backrestS3VerifyTLS"` + BackrestRepoPath string `json:"backrestRepoPath"` + // BackrestStorageTypes is a list of the different pgBackRest storage types + // to be used for this cluster. Presently, it can only accept up to local + // and S3, but is available to support different repo types in the future + // if the array is empty, "local" ("posix") is presumed. + BackrestStorageTypes []BackrestStorageType `json:"backrestStorageTypes"` + TablespaceMounts map[string]PgStorageSpec `json:"tablespaceMounts"` + TLS TLSSpec `json:"tls"` + TLSOnly bool `json:"tlsOnly"` + Standby bool `json:"standby"` + Shutdown bool `json:"shutdown"` + PGDataSource PGDataSourceSpec `json:"pgDataSource"` // Annotations contains a set of Deployment (and by association, Pod) // annotations that are propagated to all managed Deployments Annotations ClusterAnnotations `json:"annotations"` + + // ServiceType references the type of Service that should be used when + // deploying PostgreSQL instances + ServiceType v1.ServiceType `json:"serviceType"` + + // Tolerations are an optional list of Pod toleration rules that are applied + // to the PostgreSQL instance. + Tolerations []v1.Toleration `json:"tolerations"` +} + +// BackrestStorageType refers to the types of storage accept by pgBackRest +type BackrestStorageType string + +const ( + // BackrestStorageTypeLocal is DEPRECATED. It is the equivalent to "posix" + // storage and is the default storage available (well posix is the default). + // Available for legacy purposes -- this really maps to "posix" + BackrestStorageTypeLocal BackrestStorageType = "local" + // BackrestStorageTypePosix is the "posix" storage type and in the fullness + // of time should supercede local + BackrestStorageTypePosix BackrestStorageType = "posix" + // BackrestStorageTypeS3 if the S3 storage type for using S3 or S3-equivalent + // storage + BackrestStorageTypeS3 BackrestStorageType = "s3" +) + +var BackrestStorageTypes = []BackrestStorageType{ + BackrestStorageTypeLocal, + BackrestStorageTypePosix, + BackrestStorageTypeS3, } // ClusterAnnotations provides a set of annotations that can be propagated to @@ -209,6 +249,32 @@ type PgclusterStatus struct { // swagger:ignore type PgclusterState string +// NodeAffinityDefaultWeight is the default weighting for the preferred node +// affinity. This was taken from our legacy template for handling this, so there +// may be some logic to this, or this could be an arbitrary weight. Either way, +// the number needs to be somewhere between [1, 100]. +const NodeAffinityDefaultWeight int32 = 10 + +// NodeAffinitySpec contains optional NodeAffinity rules for the different +// deployment types managed by the Operator. While similar to how the Operator +// handles pod anti-affinity, makes reference to the supported Kubernetes +// objects to maintain more familiarity and consistency. +// +// All of these are optional, so one must ensure they check for nils. +type NodeAffinitySpec struct { + Default *v1.NodeAffinity `json:"default"` +} + +// NodeAffinityType indicates the type of node affinity that the request seeks +// to use. Given the custom resource uses the native Kubernetes types to set +// node affinity, this is just for convenience for the API +type NodeAffinityType int + +const ( + NodeAffinityTypePreferred NodeAffinityType = iota + NodeAffinityTypeRequired +) + // PodAntiAffinityDeployment distinguishes between the different types of // Deployments that can leverage PodAntiAffinity type PodAntiAffinityDeployment int @@ -254,6 +320,15 @@ type PgBouncerSpec struct { // Limits, if specified, contains the container resource limits // for any pgBouncer Deployments that are part of a PostgreSQL cluster Limits v1.ResourceList `json:"limits"` + // ServiceType references the type of Service that should be used when + // deploying the pgBouncer instances. If unset, it defaults to the value of + // the PostgreSQL cluster. + ServiceType v1.ServiceType `json:"serviceType"` + // TLSSecret contains the name of the secret to use that contains the TLS + // keypair for pgBouncer + // This follows the Kubernetes secret format ("kubernetes.io/tls") which has + // two keys: tls.crt and tls.key + TLSSecret string `json:"tlsSecret"` } // Enabled returns true if the pgBouncer is enabled for the cluster, i.e. there @@ -339,3 +414,54 @@ func (p PodAntiAffinityType) Validate() error { return fmt.Errorf("Invalid pod anti-affinity type. Valid values are '%s', '%s' or '%s'", PodAntiAffinityRequired, PodAntiAffinityPreffered, PodAntiAffinityDisabled) } + +// ParseBackrestStorageTypes takes a comma-delimited string of potential +// pgBackRest storage types and attempts to parse it into a recognizable array. +// if an invalid type is passed in, then an error is returned +func ParseBackrestStorageTypes(storageTypeStr string) ([]BackrestStorageType, error) { + storageTypes := make([]BackrestStorageType, 0) + + parsed := strings.Split(storageTypeStr, ",") + + // if no storage types found in the string, return + if len(parsed) == 1 && parsed[0] == "" { + return nil, ErrStorageTypesEmpty + } + + // iterate through the list and determine if there are valid storage types + // map all "local" into "posix" + for _, s := range parsed { + storageType := BackrestStorageType(s) + + switch storageType { + default: + return nil, fmt.Errorf("%w: %s", ErrInvalidStorageType, storageType) + case BackrestStorageTypePosix, BackrestStorageTypeLocal: + storageTypes = append(storageTypes, BackrestStorageTypePosix) + case BackrestStorageTypeS3: + storageTypes = append(storageTypes, storageType) + } + } + + return storageTypes, nil +} + +// UserSecretName returns the name of a Kubernetes Secret representing the user. +// Delegates to UserSecretNameFromClusterName. This is the preferred method +// given there is less thinking for the caller to do, but there are some (one?) +// cases where UserSecretNameFromClusterName needs to be called as that cluster +// object is unavailable +func UserSecretName(cluster *Pgcluster, username string) string { + return UserSecretNameFromClusterName(cluster.Name, username) +} + +// UserSecretNameFromClusterName returns the name of a Kubernetes Secret +// representing a user. +func UserSecretNameFromClusterName(clusterName, username string) string { + switch username { + default: // standard format + return fmt.Sprintf("%s-%s-secret", clusterName, username) + case PGUserMonitor: + return fmt.Sprintf("%s-exporter-secret", clusterName) + } +} diff --git a/pkg/apis/crunchydata.com/v1/cluster_test.go b/pkg/apis/crunchydata.com/v1/cluster_test.go new file mode 100644 index 0000000000..9bc23ef8d9 --- /dev/null +++ b/pkg/apis/crunchydata.com/v1/cluster_test.go @@ -0,0 +1,194 @@ +package v1 + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "errors" + "fmt" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestParseBackrestStorageTypes(t *testing.T) { + t.Run("empty", func(t *testing.T) { + _, err := ParseBackrestStorageTypes("") + + if !errors.Is(err, ErrStorageTypesEmpty) { + t.Fatalf("expected ErrStorageTypesEmpty actual %q", err.Error()) + } + }) + + t.Run("invalid", func(t *testing.T) { + _, err := ParseBackrestStorageTypes("bad bad bad") + + if !errors.Is(err, ErrInvalidStorageType) { + t.Fatalf("expected ErrInvalidStorageType actual %q", err.Error()) + } + + _, err = ParseBackrestStorageTypes("posix,bad") + + if !errors.Is(err, ErrInvalidStorageType) { + t.Fatalf("expected ErrInvalidStorageType actual %q", err.Error()) + } + }) + + t.Run("local should be posix", func(t *testing.T) { + storageTypes, err := ParseBackrestStorageTypes("local") + + if err != nil { + t.Fatalf("expected no error actual %q", err.Error()) + } + + if len(storageTypes) != 1 { + t.Fatalf("multiple storage types returned, expected 1") + } + + if storageTypes[0] != BackrestStorageTypePosix { + t.Fatalf("posix expected but not found") + } + }) + + t.Run("posix", func(t *testing.T) { + storageTypes, err := ParseBackrestStorageTypes("posix") + + if err != nil { + t.Fatalf("expected no error actual %q", err.Error()) + } + + if len(storageTypes) != 1 { + t.Fatalf("multiple storage types returned, expected 1") + } + + if storageTypes[0] != BackrestStorageTypePosix { + t.Fatalf("posix expected but not found") + } + }) + + t.Run("s3", func(t *testing.T) { + storageTypes, err := ParseBackrestStorageTypes("s3") + + if err != nil { + t.Fatalf("expected no error actual %q", err.Error()) + } + + if len(storageTypes) != 1 { + t.Fatalf("multiple storage types returned, expected 1") + } + + if storageTypes[0] != BackrestStorageTypeS3 { + t.Fatalf("s3 expected but not found") + } + }) + + t.Run("posix and s3", func(t *testing.T) { + storageTypes, err := ParseBackrestStorageTypes("posix,s3") + + if err != nil { + t.Fatalf("expected no error actual %q", err.Error()) + } + + if len(storageTypes) != 2 { + t.Fatalf("expected 2 storage types, actual %d", len(storageTypes)) + } + + posix := false + s3 := false + for _, storageType := range storageTypes { + posix = posix || (storageType == BackrestStorageTypePosix) + s3 = s3 || (storageType == BackrestStorageTypeS3) + } + + if !(posix && s3) { + t.Fatalf("posix and s3 expected but not found") + } + }) + + t.Run("local and s3", func(t *testing.T) { + storageTypes, err := ParseBackrestStorageTypes("local,s3") + + if err != nil { + t.Fatalf("expected no error actual %q", err.Error()) + } + + if len(storageTypes) != 2 { + t.Fatalf("expected 2 storage types, actual %d", len(storageTypes)) + } + + posix := false + s3 := false + for _, storageType := range storageTypes { + posix = posix || (storageType == BackrestStorageTypePosix) + s3 = s3 || (storageType == BackrestStorageTypeS3) + } + + if !(posix && s3) { + t.Fatalf("posix and s3 expected but not found") + } + }) +} + +func TestUserSecretName(t *testing.T) { + cluster := &Pgcluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "second-pick", + }, + Spec: PgclusterSpec{ + ClusterName: "second-pick", + User: "puppy", + }, + } + + t.Run(PGUserMonitor, func(t *testing.T) { + expected := fmt.Sprintf("%s-%s-secret", cluster.Name, "exporter") + actual := UserSecretName(cluster, PGUserMonitor) + if expected != actual { + t.Fatalf("expected %q, got %q", expected, actual) + } + }) + + t.Run("any other user", func(t *testing.T) { + for _, user := range []string{PGUserSuperuser, PGUserReplication, cluster.Spec.User} { + expected := fmt.Sprintf("%s-%s-secret", cluster.Name, user) + actual := UserSecretName(cluster, user) + if expected != actual { + t.Fatalf("expected %q, got %q", expected, actual) + } + } + }) +} + +func TestUserSecretNameFromClusterName(t *testing.T) { + clusterName := "second-pick" + + t.Run(PGUserMonitor, func(t *testing.T) { + expected := fmt.Sprintf("%s-%s-secret", clusterName, "exporter") + actual := UserSecretNameFromClusterName(clusterName, PGUserMonitor) + if expected != actual { + t.Fatalf("expected %q, got %q", expected, actual) + } + }) + + t.Run("any other user", func(t *testing.T) { + for _, user := range []string{PGUserSuperuser, PGUserReplication, "puppy"} { + expected := fmt.Sprintf("%s-%s-secret", clusterName, user) + actual := UserSecretNameFromClusterName(clusterName, user) + if expected != actual { + t.Fatalf("expected %q, got %q", expected, actual) + } + } + }) +} diff --git a/pkg/apis/crunchydata.com/v1/common.go b/pkg/apis/crunchydata.com/v1/common.go index 723c2a0a60..07bfd11934 100644 --- a/pkg/apis/crunchydata.com/v1/common.go +++ b/pkg/apis/crunchydata.com/v1/common.go @@ -1,7 +1,7 @@ package v1 /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -22,18 +22,6 @@ import ( log "github.com/sirupsen/logrus" ) -// RootSecretSuffix ... -const RootSecretSuffix = "-postgres-secret" - -// UserSecretSuffix ... -const UserSecretSuffix = "-secret" - -// PrimarySecretSuffix ... -const PrimarySecretSuffix = "-primaryuser-secret" - -// ExporterSecretSuffix ... -const ExporterSecretSuffix = "-exporter-secret" - // StorageExisting ... const StorageExisting = "existing" @@ -49,8 +37,8 @@ const StorageDynamic = "dynamic" // the following are standard PostgreSQL user service accounts that are created // as part of managed the PostgreSQL cluster environment via the Operator const ( - // PGUserAdmin is a special user that can perform administrative actions - // without being a superuser itself + // PGUserAdmin is a DEPRECATED user and is only included to filter this out + // as a system user in older systems PGUserAdmin = "crunchyadm" // PGUserMonitor is the monitoring user that can access metric data PGUserMonitor = "ccp_monitoring" @@ -114,7 +102,6 @@ func (s PgStorageSpec) GetSupplementalGroups() []int64 { } supplementalGroup, err := strconv.Atoi(result) - // if there is an error, only warn about it and continue through the loop if err != nil { log.Warnf("malformed storage supplemental group: %v", err) diff --git a/pkg/apis/crunchydata.com/v1/common_test.go b/pkg/apis/crunchydata.com/v1/common_test.go index 8ad909e64f..353d92f3e5 100644 --- a/pkg/apis/crunchydata.com/v1/common_test.go +++ b/pkg/apis/crunchydata.com/v1/common_test.go @@ -1,7 +1,7 @@ package v1 /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apis/crunchydata.com/v1/doc.go b/pkg/apis/crunchydata.com/v1/doc.go index 3d5c49cd25..9482c53360 100644 --- a/pkg/apis/crunchydata.com/v1/doc.go +++ b/pkg/apis/crunchydata.com/v1/doc.go @@ -3,7 +3,6 @@ Crunchy PostgreSQL Operator API The Crunchy PostgreSQL Operator API defines HTTP(S) interactions with the Crunchy PostgreSQL Operator. - ## Direct API Calls The API can also be accessed by interacting directly with the API server. This @@ -15,7 +14,6 @@ that includes the content type and the `--insecure` flag. These flags will be the same for all of your interactions with the API server and can be seen in the following examples. - ###### Get API Server Version The most basic example of this interaction is getting the version of the API @@ -52,14 +50,14 @@ cluster. ``` curl --cacert $PGO_CA_CERT --key $PGO_CLIENT_KEY --cert $PGO_CA_CERT -u \ admin:examplepassword -H "Content-Type:application/json" --insecure -X \ -POST --data \ - '{"ClientVersion":"4.5.0", - "Namespace":"pgouser1", - "Name":"mycluster", -$PGO_APISERVER_URL/clusters -``` + POST --data \ + '{"ClientVersion":"4.6.10", + "Namespace":"pgouser1", + "Name":"mycluster", +$PGO_APISERVER_URL/clusters +``` ###### Show and Delete Cluster The last two examples show you how to `show` and `delete` a cluster. Notice @@ -71,35 +69,39 @@ show all of the clusters that are in the given namespace. ``` curl --cacert $PGO_CA_CERT --key $PGO_CLIENT_KEY --cert $PGO_CA_CERT -u \ admin:examplepassword -H "Content-Type:application/json" --insecure -X \ -POST --data \ - '{"ClientVersion":"4.5.0", - "Namespace":"pgouser1", - "Clustername":"mycluster"}' \ + + POST --data \ + '{"ClientVersion":"4.6.10", + "Namespace":"pgouser1", + "Clustername":"mycluster"}' \ + $PGO_APISERVER_URL/showclusters ``` ``` curl --cacert $PGO_CA_CERT --key $PGO_CLIENT_KEY --cert $PGO_CA_CERT -u \ admin:examplepassword -H "Content-Type:application/json" --insecure -X \ -POST --data \ - '{"ClientVersion":"4.5.0", - "Namespace":"pgouser1", - "Clustername":"mycluster"}' \ + + POST --data \ + '{"ClientVersion":"4.6.10", + "Namespace":"pgouser1", + "Clustername":"mycluster"}' \ + $PGO_APISERVER_URL/clustersdelete ``` - Schemes: http, https - BasePath: / - Version: 4.5.0 - License: Apache 2.0 http://www.apache.org/licenses/LICENSE-2.0 - Contact: Crunchy Data https://www.crunchydata.com/ + Schemes: http, https + BasePath: / + Version: 4.6.10 + License: Apache 2.0 http://www.apache.org/licenses/LICENSE-2.0 + Contact: Crunchy Data https://www.crunchydata.com/ - Consumes: - - application/json + Consumes: + - application/json - Produces: - - application/json + Produces: + - application/json swagger:meta */ @@ -108,7 +110,7 @@ package v1 // +k8s:deepcopy-gen=package,register /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apis/crunchydata.com/v1/errors.go b/pkg/apis/crunchydata.com/v1/errors.go new file mode 100644 index 0000000000..ec2c8dfc16 --- /dev/null +++ b/pkg/apis/crunchydata.com/v1/errors.go @@ -0,0 +1,23 @@ +package v1 + +import "errors" + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +var ( + ErrStorageTypesEmpty = errors.New("no storage types detected") + ErrInvalidStorageType = errors.New("invalid storage type") +) diff --git a/pkg/apis/crunchydata.com/v1/policy.go b/pkg/apis/crunchydata.com/v1/policy.go index 28347f9950..57370ec932 100644 --- a/pkg/apis/crunchydata.com/v1/policy.go +++ b/pkg/apis/crunchydata.com/v1/policy.go @@ -1,7 +1,7 @@ package v1 /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -27,7 +27,6 @@ const PgpolicyResourcePlural = "pgpolicies" type PgpolicySpec struct { Namespace string `json:"namespace"` Name string `json:"name"` - URL string `json:"url"` SQL string `json:"sql"` Status string `json:"status"` } diff --git a/pkg/apis/crunchydata.com/v1/register.go b/pkg/apis/crunchydata.com/v1/register.go index 00db119bda..6b5926e412 100644 --- a/pkg/apis/crunchydata.com/v1/register.go +++ b/pkg/apis/crunchydata.com/v1/register.go @@ -1,7 +1,7 @@ package v1 /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -30,7 +30,7 @@ var ( ) // GroupName is the group name used in this package. -//const GroupName = "cr.client-go.k8s.io" +// const GroupName = "cr.client-go.k8s.io" const GroupName = "crunchydata.com" // SchemeGroupVersion is the group version used to register these objects. diff --git a/pkg/apis/crunchydata.com/v1/replica.go b/pkg/apis/crunchydata.com/v1/replica.go index 386fa033d0..990bb2d050 100644 --- a/pkg/apis/crunchydata.com/v1/replica.go +++ b/pkg/apis/crunchydata.com/v1/replica.go @@ -1,7 +1,7 @@ package v1 /* - Copyright 2018 - 2020 Crunchy Data Solutions, Inc. + Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -16,6 +16,7 @@ package v1 */ import ( + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -36,12 +37,21 @@ type Pgreplica struct { // PgreplicaSpec ... // swagger:ignore type PgreplicaSpec struct { - Namespace string `json:"namespace"` - Name string `json:"name"` - ClusterName string `json:"clustername"` - ReplicaStorage PgStorageSpec `json:"replicastorage"` - Status string `json:"status"` - UserLabels map[string]string `json:"userlabels"` + Namespace string `json:"namespace"` + Name string `json:"name"` + ClusterName string `json:"clustername"` + ReplicaStorage PgStorageSpec `json:"replicastorage"` + // ServiceType references the type of Service that should be used when + // deploying PostgreSQL instances + ServiceType v1.ServiceType `json:"serviceType"` + Status string `json:"status"` + UserLabels map[string]string `json:"userlabels"` + // NodeAffinity is an optional structure that dictates how an instance should + // be deployed in an environment + NodeAffinity *v1.NodeAffinity `json:"nodeAffinity"` + // Tolerations are an optional list of Pod toleration rules that are applied + // to the PostgreSQL instance. + Tolerations []v1.Toleration `json:"tolerations"` } // PgreplicaList ... diff --git a/pkg/apis/crunchydata.com/v1/task.go b/pkg/apis/crunchydata.com/v1/task.go index 1475b61ad7..1f5bbfa29b 100644 --- a/pkg/apis/crunchydata.com/v1/task.go +++ b/pkg/apis/crunchydata.com/v1/task.go @@ -1,7 +1,7 @@ package v1 /* - Copyright 2017 - 2020 Crunchy Data Solutions, Inc. + Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -22,41 +22,53 @@ import ( // PgtaskResourcePlural ... const PgtaskResourcePlural = "pgtasks" -const PgtaskDeleteBackups = "delete-backups" -const PgtaskDeleteData = "delete-data" -const PgtaskFailover = "failover" -const PgtaskAutoFailover = "autofailover" -const PgtaskAddPolicies = "addpolicies" - -const PgtaskUpgrade = "clusterupgrade" -const PgtaskUpgradeCreated = "cluster upgrade - task created" -const PgtaskUpgradeInProgress = "cluster upgrade - in progress" - -const PgtaskPgAdminAdd = "add-pgadmin" -const PgtaskPgAdminDelete = "delete-pgadmin" - -const PgtaskWorkflow = "workflow" -const PgtaskWorkflowCreateClusterType = "createcluster" -const PgtaskWorkflowBackrestRestoreType = "pgbackrestrestore" -const PgtaskWorkflowBackupType = "backupworkflow" -const PgtaskWorkflowSubmittedStatus = "task submitted" -const PgtaskWorkflowCompletedStatus = "task completed" -const PgtaskWorkflowID = "workflowid" - -const PgtaskWorkflowBackrestRestorePVCCreatedStatus = "restored PVC created" -const PgtaskWorkflowBackrestRestorePrimaryCreatedStatus = "restored Primary created" -const PgtaskWorkflowBackrestRestoreJobCreatedStatus = "restore job created" - -const PgtaskBackrest = "backrest" -const PgtaskBackrestBackup = "backup" -const PgtaskBackrestInfo = "info" -const PgtaskBackrestRestore = "restore" -const PgtaskBackrestStanzaCreate = "stanza-create" - -const PgtaskpgDump = "pgdump" -const PgtaskpgDumpBackup = "pgdumpbackup" -const PgtaskpgDumpInfo = "pgdumpinfo" -const PgtaskpgRestore = "pgrestore" +const ( + PgtaskDeleteData = "delete-data" + PgtaskAddPolicies = "addpolicies" + PgtaskRollingUpdate = "rolling update" +) + +const ( + PgtaskUpgrade = "clusterupgrade" + PgtaskUpgradeCreated = "cluster upgrade - task created" + PgtaskUpgradeInProgress = "cluster upgrade - in progress" +) + +const ( + PgtaskPgAdminAdd = "add-pgadmin" + PgtaskPgAdminDelete = "delete-pgadmin" +) + +const ( + PgtaskWorkflow = "workflow" + PgtaskWorkflowCreateClusterType = "createcluster" + PgtaskWorkflowBackrestRestoreType = "pgbackrestrestore" + PgtaskWorkflowBackupType = "backupworkflow" + PgtaskWorkflowSubmittedStatus = "task submitted" + PgtaskWorkflowCompletedStatus = "task completed" + PgtaskWorkflowID = "workflowid" +) + +const ( + PgtaskWorkflowBackrestRestorePVCCreatedStatus = "restored PVC created" + PgtaskWorkflowBackrestRestorePrimaryCreatedStatus = "restored Primary created" + PgtaskWorkflowBackrestRestoreJobCreatedStatus = "restore job created" +) + +const ( + PgtaskBackrest = "backrest" + PgtaskBackrestBackup = "backup" + PgtaskBackrestInfo = "info" + PgtaskBackrestRestore = "restore" + PgtaskBackrestStanzaCreate = "stanza-create" +) + +const ( + PgtaskpgDump = "pgdump" + PgtaskpgDumpBackup = "pgdumpbackup" + PgtaskpgDumpInfo = "pgdumpinfo" + PgtaskpgRestore = "pgrestore" +) // this is ported over from legacy backup code const PgBackupJobSubmitted = "Backup Job Submitted" @@ -70,10 +82,6 @@ const ( BackupTypeBootstrap string = "bootstrap" ) -// BackrestStorageTypes defines the valid types of storage that can be utilized -// with pgBackRest -var BackrestStorageTypes = []string{"local", "s3"} - // PgtaskSpec ... // swagger:ignore type PgtaskSpec struct { diff --git a/pkg/apis/crunchydata.com/v1/zz_generated.deepcopy.go b/pkg/apis/crunchydata.com/v1/zz_generated.deepcopy.go index 80fd389e4f..04a5793426 100644 --- a/pkg/apis/crunchydata.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/crunchydata.com/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ // +build !ignore_autogenerated /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -68,6 +68,27 @@ func (in *ClusterAnnotations) DeepCopy() *ClusterAnnotations { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAffinitySpec) DeepCopyInto(out *NodeAffinitySpec) { + *out = *in + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(corev1.NodeAffinity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinitySpec. +func (in *NodeAffinitySpec) DeepCopy() *NodeAffinitySpec { + if in == nil { + return nil + } + out := new(NodeAffinitySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PGDataSourceSpec) DeepCopyInto(out *PGDataSourceSpec) { *out = *in @@ -196,7 +217,6 @@ func (in *PgclusterSpec) DeepCopyInto(out *PgclusterSpec) { *out = *in out.PrimaryStorage = in.PrimaryStorage out.WALStorage = in.WALStorage - out.ArchiveStorage = in.ArchiveStorage out.ReplicaStorage = in.ReplicaStorage out.BackrestStorage = in.BackrestStorage if in.Resources != nil { @@ -249,6 +269,7 @@ func (in *PgclusterSpec) DeepCopyInto(out *PgclusterSpec) { (*out)[key] = val } } + in.NodeAffinity.DeepCopyInto(&out.NodeAffinity) out.PodAntiAffinity = in.PodAntiAffinity if in.SyncReplication != nil { in, out := &in.SyncReplication, &out.SyncReplication @@ -262,6 +283,11 @@ func (in *PgclusterSpec) DeepCopyInto(out *PgclusterSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.BackrestStorageTypes != nil { + in, out := &in.BackrestStorageTypes, &out.BackrestStorageTypes + *out = make([]BackrestStorageType, len(*in)) + copy(*out, *in) + } if in.TablespaceMounts != nil { in, out := &in.TablespaceMounts, &out.TablespaceMounts *out = make(map[string]PgStorageSpec, len(*in)) @@ -272,6 +298,13 @@ func (in *PgclusterSpec) DeepCopyInto(out *PgclusterSpec) { out.TLS = in.TLS out.PGDataSource = in.PGDataSource in.Annotations.DeepCopyInto(&out.Annotations) + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -466,6 +499,18 @@ func (in *PgreplicaSpec) DeepCopyInto(out *PgreplicaSpec) { (*out)[key] = val } } + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(corev1.NodeAffinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/pkg/apiservermsgs/backrestmsgs.go b/pkg/apiservermsgs/backrestmsgs.go index 12d72844b9..a87b72b987 100644 --- a/pkg/apiservermsgs/backrestmsgs.go +++ b/pkg/apiservermsgs/backrestmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -15,6 +15,10 @@ See the License for the specific language governing permissions and limitations under the License. */ +import ( + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" +) + // CreateBackrestBackupResponse ... // swagger:model type CreateBackrestBackupResponse struct { @@ -32,6 +36,27 @@ type CreateBackrestBackupRequest struct { BackrestStorageType string } +// DeleteBackrestBackupRequest ... +// swagger:model +type DeleteBackrestBackupRequest struct { + // ClientVersion represents the version of the client that is making the API + // request + ClientVersion string + // ClusterName is the name of the pgcluster of which we want to delete the + // backup from + ClusterName string + // Namespace isthe namespace that the cluster is in + Namespace string + // Target is the nane of the backup to be deleted + Target string +} + +// DeleteBackrestBackupRequest ... +// swagger:model +type DeleteBackrestBackupResponse struct { + Status +} + // PgBackRestInfo and its associated structs are available for parsing the info // that comes from the output of the "pgbackrest info --output json" command type PgBackRestInfo struct { @@ -124,10 +149,13 @@ type RestoreResponse struct { // RestoreRequest ... // swagger:model type RestoreRequest struct { - Namespace string - FromCluster string - RestoreOpts string - PITRTarget string + Namespace string + FromCluster string + RestoreOpts string + PITRTarget string + // NodeAffinityType is only considered when "NodeLabel" is also set, and is + // either a value of "preferred" (default) or "required" + NodeAffinityType crv1.NodeAffinityType NodeLabel string BackrestStorageType string } diff --git a/pkg/apiservermsgs/catmsgs.go b/pkg/apiservermsgs/catmsgs.go index ded313371f..23500245ab 100644 --- a/pkg/apiservermsgs/catmsgs.go +++ b/pkg/apiservermsgs/catmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/clustermsgs.go b/pkg/apiservermsgs/clustermsgs.go index e8983613c4..2b81779223 100644 --- a/pkg/apiservermsgs/clustermsgs.go +++ b/pkg/apiservermsgs/clustermsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -17,6 +17,8 @@ limitations under the License. import ( crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" + + v1 "k8s.io/api/core/v1" ) // ShowClusterRequest shows cluster @@ -44,15 +46,18 @@ type ShowClusterRequest struct { // // swagger:model type CreateClusterRequest struct { - Name string `json:"Name"` - Namespace string + Name string `json:"Name"` + Namespace string + // NodeAffinityType is only considered when "NodeLabel" is also set, and is + // either a value of "preferred" (default) or "required" + NodeAffinityType crv1.NodeAffinityType NodeLabel string PasswordLength int PasswordSuperuser string PasswordReplication string Password string SecretFrom string - UserLabels string + UserLabels map[string]string Tablespaces []ClusterTablespaceDetail Policies string CCPImage string @@ -60,7 +65,7 @@ type CreateClusterRequest struct { CCPImagePrefix string PGOImagePrefix string ReplicaCount int - ServiceType string + ServiceType v1.ServiceType MetricsFlag bool // ExporterCPULimit, if specified, is the value of the max CPU for a // Crunchy Postgres Exporter sidecar container @@ -82,12 +87,20 @@ type CreateClusterRequest struct { AutofailFlag bool ArchiveFlag bool BackrestStorageType string - //BackrestRestoreFrom string + // BackrestRestoreFrom string PgbouncerFlag bool // PgBouncerReplicas represents the total number of pgBouncer pods to deploy with a // PostgreSQL cluster. Only works if PgbouncerFlag is set, and if so, it must // be at least 1. If 0 is passed in, it will automatically be set to 1 - PgBouncerReplicas int32 + PgBouncerReplicas int32 + // PgBouncerServiceType, if specified and if PgbouncerFlag is true, is the + // ServiceType to use for pgBouncer. If not set, the value is defaultd to that + // of the PostgreSQL cluster ServiceType. + PgBouncerServiceType v1.ServiceType + // PgBouncerTLSSecret is the name of the Secret containing the TLS keypair + // for enabling TLS with pgBouncer. This also requires for TLSSecret and + // CASecret to be set + PgBouncerTLSSecret string CustomConfig string StorageConfig string WALStorageConfig string @@ -198,6 +211,8 @@ type CreateClusterRequest struct { PGDataSource crv1.PGDataSourceSpec // Annotations provide any custom annotations for a cluster Annotations crv1.ClusterAnnotations `json:"annotations"` + // Tolerations allows for the setting of Pod tolerations on Postgres instances + Tolerations []v1.Toleration `json:"tolerations"` } // CreateClusterDetail provides details about the PostgreSQL cluster that is @@ -248,14 +263,17 @@ type ShowClusterService struct { ClusterName string Pgbouncer bool BackrestRepo bool + PGAdmin bool } -const PodTypePrimary = "primary" -const PodTypeReplica = "replica" -const PodTypePgbouncer = "pgbouncer" -const PodTypePgbackrest = "pgbackrest" -const PodTypeBackup = "backup" -const PodTypeUnknown = "unknown" +const ( + PodTypePrimary = "primary" + PodTypeReplica = "replica" + PodTypePgbouncer = "pgbouncer" + PodTypePgbackrest = "pgbackrest" + PodTypeBackup = "backup" + PodTypeUnknown = "unknown" +) // ShowClusterPod // @@ -352,6 +370,26 @@ const ( UpdateClusterAutofailDisable ) +// UpdateClusterMetrics determines whether or not to enable/disable the metrics +// collection sidecar in a cluster +type UpdateClusterMetrics int + +const ( + UpdateClusterMetricsDoNothing UpdateClusterMetrics = iota + UpdateClusterMetricsEnable + UpdateClusterMetricsDisable +) + +// UpdateClusterPGBadger determines whether or not to enable/disable the +// pgBadger sidecar in a cluster +type UpdateClusterPGBadger int + +const ( + UpdateClusterPGBadgerDoNothing UpdateClusterPGBadger = iota + UpdateClusterPGBadgerEnable + UpdateClusterPGBadgerDisable +) + // UpdateClusterStandbyStatus defines the types for updating the Standby status type UpdateClusterStandbyStatus int @@ -409,6 +447,9 @@ type UpdateClusterRequest struct { // ExporterMemoryRequest, if specified, is the value of how much RAM should // be requested for the Crunchy Postgres Exporter instance. ExporterMemoryRequest string + // ExporterRotatePassword, if specified, rotates the password of the metrics + // collection agent, i.e. the "ccp_monitoring" user. + ExporterRotatePassword bool // CPULimit is the value of the max CPU utilization for a Pod that has a // PostgreSQL cluster CPULimit string @@ -422,10 +463,24 @@ type UpdateClusterRequest struct { // MemoryRequest is the value of how much RAM should be requested for // deploying the PostgreSQL cluster MemoryRequest string - Standby UpdateClusterStandbyStatus - Startup bool - Shutdown bool - Tablespaces []ClusterTablespaceDetail + // Metrics allows for the enabling/disabling of the metrics sidecar. This can + // cause downtime and triggers a rolling update + Metrics UpdateClusterMetrics + // PGBadger allows for the enabling/disabling of the pgBadger sidecar. This can + // cause downtime and triggers a rolling update + PGBadger UpdateClusterPGBadger + // ServiceType, if specified, will change the service type of a cluster. + ServiceType v1.ServiceType + Standby UpdateClusterStandbyStatus + Startup bool + Shutdown bool + Tablespaces []ClusterTablespaceDetail + // Tolerations allows for the adding of Pod tolerations on a PostgreSQL + // cluster. + Tolerations []v1.Toleration `json:"tolerations"` + // TolerationsDelete allows for the removal of Pod tolerations on a + // PostgreSQL cluster + TolerationsDelete []v1.Toleration `json:"tolerationsDelete"` } // UpdateClusterResponse ... @@ -452,6 +507,7 @@ type ClusterTestRequest struct { const ( ClusterTestInstanceTypePrimary = "primary" ClusterTestInstanceTypeReplica = "replica" + ClusterTestInstanceTypePGAdmin = "pgadmin" ClusterTestInstanceTypePGBouncer = "pgbouncer" ClusterTestInstanceTypeBackups = "backups" ClusterTestInstanceTypeUnknown = "unknown" @@ -511,6 +567,39 @@ type ScaleDownResponse struct { Status } +// ClusterScaleRequest superimposes on the legacy model of handling the ability +// to scale up the number of instances on a cluster +// swagger:model +type ClusterScaleRequest struct { + // CCPImageTag is the image tag to use for cluster creation. If this is not + // provided, this defaults to what the cluster is using, which is likely + // the preferred behavior at this point. + CCPImageTag string `json:"ccpImageTag"` + // ClientVersion is the version of the client that is being used + ClientVersion string `json:"clientVersion"` + // Name is the name of the cluster to scale. This is set by the value in the + // URL + Name string `json:"name"` + // Namespace is the namespace in which the queried cluster resides. + Namespace string `json:"namespace"` + // NodeAffinityType is only considered when "NodeLabel" is also set, and is + // either a value of "preferred" (default) or "required" + NodeAffinityType crv1.NodeAffinityType + // NodeLabel if provided is a node label to use. + NodeLabel string `json:"nodeLabel"` + // ReplicaCount is the number of replicas to add to the cluster. This is + // required and should be at least 1. + ReplicaCount int `json:"replicaCount"` + // ServiceType is the kind of Service to deploy with this instance. Defaults + // to the value on the cluster. + ServiceType v1.ServiceType `json:"serviceType"` + // StorageConfig, if provided, specifies which of the storage configuration + // options should be used. Defaults to what the main cluster definition uses. + StorageConfig string `json:"storageConfig"` + // Tolerations allows for the setting of Pod tolerations on Postgres instances + Tolerations []v1.Toleration `json:"tolerations"` +} + // ClusterScaleResponse ... // swagger:model type ClusterScaleResponse struct { diff --git a/pkg/apiservermsgs/common.go b/pkg/apiservermsgs/common.go index cce7f6be40..49c2b295b4 100644 --- a/pkg/apiservermsgs/common.go +++ b/pkg/apiservermsgs/common.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -15,7 +15,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -const PGO_VERSION = "4.5.0" +const PGO_VERSION = "4.6.10" // Ok status const Ok = "ok" diff --git a/pkg/apiservermsgs/configmsgs.go b/pkg/apiservermsgs/configmsgs.go index 06ed680008..99d300841d 100644 --- a/pkg/apiservermsgs/configmsgs.go +++ b/pkg/apiservermsgs/configmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -15,13 +15,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -import ( - "github.com/crunchydata/postgres-operator/internal/config" -) - // ShowConfigResponse ... // swagger:model type ShowConfigResponse struct { - Result config.PgoConfig + Result interface{} Status } diff --git a/pkg/apiservermsgs/dfmsgs.go b/pkg/apiservermsgs/dfmsgs.go index 22541840e7..d7117ce3b4 100644 --- a/pkg/apiservermsgs/dfmsgs.go +++ b/pkg/apiservermsgs/dfmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/failovermsgs.go b/pkg/apiservermsgs/failovermsgs.go index bfeefcb49a..425d9b447f 100644 --- a/pkg/apiservermsgs/failovermsgs.go +++ b/pkg/apiservermsgs/failovermsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -37,18 +37,22 @@ type QueryFailoverResponse struct { // CreateFailoverResponse ... // swagger:model type CreateFailoverResponse struct { - Results []string - Targets string + Results string Status } // CreateFailoverRequest ... // swagger:model type CreateFailoverRequest struct { - Namespace string - ClusterName string - Target string ClientVersion string + ClusterName string + // Force determines whether or not to force the failover. A normal failover + // request uses a switchover, which seeks a healthy option. However, "Force" + // forces the issue so to speak, and will promote either the instance that is + // the best fit or the specified target + Force bool + Namespace string + Target string } // QueryFailoverRequest ... diff --git a/pkg/apiservermsgs/labelmsgs.go b/pkg/apiservermsgs/labelmsgs.go index eabf3e8ecf..7fc0289170 100644 --- a/pkg/apiservermsgs/labelmsgs.go +++ b/pkg/apiservermsgs/labelmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -21,7 +21,7 @@ type LabelRequest struct { Selector string Namespace string Args []string - LabelCmdLabel string + Labels map[string]string DryRun bool DeleteLabel bool ClientVersion string @@ -33,7 +33,7 @@ type DeleteLabelRequest struct { Selector string Namespace string Args []string - LabelCmdLabel string + Labels map[string]string ClientVersion string } diff --git a/pkg/apiservermsgs/namespacemsgs.go b/pkg/apiservermsgs/namespacemsgs.go index 3921604a00..b72ccfa7dc 100644 --- a/pkg/apiservermsgs/namespacemsgs.go +++ b/pkg/apiservermsgs/namespacemsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/pgadminmsgs.go b/pkg/apiservermsgs/pgadminmsgs.go index 5d68b9352d..4c63347545 100644 --- a/pkg/apiservermsgs/pgadminmsgs.go +++ b/pkg/apiservermsgs/pgadminmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/pgbouncermsgs.go b/pkg/apiservermsgs/pgbouncermsgs.go index 0feab5f15e..fd34a22aaa 100644 --- a/pkg/apiservermsgs/pgbouncermsgs.go +++ b/pkg/apiservermsgs/pgbouncermsgs.go @@ -1,7 +1,9 @@ package apiservermsgs +import v1 "k8s.io/api/core/v1" + /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -40,6 +42,12 @@ type CreatePgbouncerRequest struct { // automatically be set to 1 Replicas int32 Selector string + // ServiceType is the kind of Service to deploy with this instance. If unset, + // it will default to the value for the PostgreSQL cluster. + ServiceType v1.ServiceType `json:"serviceType"` + // TLSSecret is the name of the secret that contains the keypair required to + // deploy TLS-enabled pgBouncer + TLSSecret string } // CreatePgbouncerResponse ... @@ -183,6 +191,10 @@ type UpdatePgBouncerRequest struct { // Selector is optional and contains a selector for pgBouncer deployments that // are to be updated Selector string + + // ServiceType is the kind of Service to deploy with this instance. If unset, + // it will default to the value for the PostgreSQL cluster. + ServiceType v1.ServiceType `json:"serviceType"` } // UpdatePgBouncerResponse contains the resulting output of the update request diff --git a/pkg/apiservermsgs/pgdumpmsgs.go b/pkg/apiservermsgs/pgdumpmsgs.go index e247fca304..5133978241 100644 --- a/pkg/apiservermsgs/pgdumpmsgs.go +++ b/pkg/apiservermsgs/pgdumpmsgs.go @@ -1,11 +1,7 @@ package apiservermsgs -import ( - crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" -) - /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -19,6 +15,10 @@ See the License for the specific language governing permissions and limitations under the License. */ +import ( + crv1 "github.com/crunchydata/postgres-operator/pkg/apis/crunchydata.com/v1" +) + // CreatepgDumpBackupResponse ... // swagger:model type CreatepgDumpBackupResponse struct { @@ -61,7 +61,10 @@ type PgRestoreRequest struct { PGDumpDB string RestoreOpts string PITRTarget string - NodeLabel string + // NodeAffinityType is only considered when "NodeLabel" is also set, and is + // either a value of "preferred" (default) or "required" + NodeAffinityType crv1.NodeAffinityType + NodeLabel string } // NOTE: these are ported over from legacy functionality diff --git a/pkg/apiservermsgs/pgorolemsgs.go b/pkg/apiservermsgs/pgorolemsgs.go index 1f62efa1ab..43b488f3d8 100644 --- a/pkg/apiservermsgs/pgorolemsgs.go +++ b/pkg/apiservermsgs/pgorolemsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/pgousermsgs.go b/pkg/apiservermsgs/pgousermsgs.go index 815f8f1fdf..e4189a2a7e 100644 --- a/pkg/apiservermsgs/pgousermsgs.go +++ b/pkg/apiservermsgs/pgousermsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/policymsgs.go b/pkg/apiservermsgs/policymsgs.go index ec3e7cf2f9..793815d8f9 100644 --- a/pkg/apiservermsgs/policymsgs.go +++ b/pkg/apiservermsgs/policymsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -33,7 +33,6 @@ type ShowPolicyRequest struct { // swagger:model type CreatePolicyRequest struct { Name string - URL string SQL string Namespace string ClientVersion string diff --git a/pkg/apiservermsgs/pvcmsgs.go b/pkg/apiservermsgs/pvcmsgs.go index f59ddd7983..421380027a 100644 --- a/pkg/apiservermsgs/pvcmsgs.go +++ b/pkg/apiservermsgs/pvcmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/reloadmsgs.go b/pkg/apiservermsgs/reloadmsgs.go index 34a3738399..cbcdfcc624 100644 --- a/pkg/apiservermsgs/reloadmsgs.go +++ b/pkg/apiservermsgs/reloadmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/restartmsgs.go b/pkg/apiservermsgs/restartmsgs.go index c0b32d3d00..e9d23de05e 100644 --- a/pkg/apiservermsgs/restartmsgs.go +++ b/pkg/apiservermsgs/restartmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -47,6 +47,7 @@ type InstanceDetail struct { type RestartRequest struct { Namespace string ClusterName string + RollingUpdate bool Targets []string ClientVersion string } diff --git a/pkg/apiservermsgs/schedulemsgs.go b/pkg/apiservermsgs/schedulemsgs.go index 4b037a5992..e64d10bf32 100644 --- a/pkg/apiservermsgs/schedulemsgs.go +++ b/pkg/apiservermsgs/schedulemsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/statusmsgs.go b/pkg/apiservermsgs/statusmsgs.go index 94994c75b9..2a4013d76b 100644 --- a/pkg/apiservermsgs/statusmsgs.go +++ b/pkg/apiservermsgs/statusmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/upgrademsgs.go b/pkg/apiservermsgs/upgrademsgs.go index ab7fecc47a..dcaa5ead61 100644 --- a/pkg/apiservermsgs/upgrademsgs.go +++ b/pkg/apiservermsgs/upgrademsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/usermsgs.go b/pkg/apiservermsgs/usermsgs.go index 1f0ba56295..9f830c7a3b 100644 --- a/pkg/apiservermsgs/usermsgs.go +++ b/pkg/apiservermsgs/usermsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -15,12 +15,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -import ( - "errors" - - pgpassword "github.com/crunchydata/postgres-operator/internal/postgres/password" -) - type UpdateClusterLoginState int // set the different values around whether or not to disable/enable a user's @@ -31,21 +25,6 @@ const ( UpdateUserLoginDisable ) -var ( - // ErrPasswordTypeInvalid is used when a string that's not included in - // PasswordTypeStrings is used - ErrPasswordTypeInvalid = errors.New("invalid password type. choices are (md5, scram-sha-256)") -) - -// passwordTypeStrings is a mapping of strings of password types to their -// corresponding value of the structured password type -var passwordTypeStrings = map[string]pgpassword.PasswordType{ - "": pgpassword.MD5, - "md5": pgpassword.MD5, - "scram": pgpassword.SCRAM, - "scram-sha-256": pgpassword.SCRAM, -} - // CreateUserRequest contains the parameters that are passed in when an Operator // user requests to create a new PostgreSQL user // swagger:model @@ -131,7 +110,10 @@ type UpdateUserRequest struct { PasswordValidAlways bool RotatePassword bool Selector string - Username string + // SetSystemAccountPassword allows one to override the password for a + // designated system account + SetSystemAccountPassword bool + Username string } // UpdateUserResponse contains the response after an update user request @@ -152,15 +134,3 @@ type UserResponseDetail struct { Username string ValidUntil string } - -// GetPasswordType returns the enumerated password type based on the string, and -// an error if it cannot match one -func GetPasswordType(passwordTypeStr string) (pgpassword.PasswordType, error) { - passwordType, ok := passwordTypeStrings[passwordTypeStr] - - if !ok { - return passwordType, ErrPasswordTypeInvalid - } - - return passwordType, nil -} diff --git a/pkg/apiservermsgs/usermsgs_test.go b/pkg/apiservermsgs/usermsgs_test.go deleted file mode 100644 index d2f70388fc..0000000000 --- a/pkg/apiservermsgs/usermsgs_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package apiservermsgs - -/* - Copyright 2020 Crunchy Data Solutions, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import ( - "testing" - - pgpassword "github.com/crunchydata/postgres-operator/internal/postgres/password" -) - -func TestGetPasswordType(t *testing.T) { - t.Run("valid", func(t *testing.T) { - tests := map[string]pgpassword.PasswordType{ - "": pgpassword.MD5, - "md5": pgpassword.MD5, - "scram": pgpassword.SCRAM, - "scram-sha-256": pgpassword.SCRAM, - } - - for passwordTypeStr, expected := range tests { - t.Run(passwordTypeStr, func(t *testing.T) { - passwordType, err := GetPasswordType(passwordTypeStr) - - if err != nil { - t.Error(err) - return - } - - if passwordType != expected { - t.Errorf("password type %q should yield %d", passwordTypeStr, expected) - } - }) - } - }) - - t.Run("invalid", func(t *testing.T) { - tests := map[string]error{ - "magic": ErrPasswordTypeInvalid, - "scram-sha-512": ErrPasswordTypeInvalid, - } - - for passwordTypeStr, expected := range tests { - t.Run(passwordTypeStr, func(t *testing.T) { - if _, err := GetPasswordType(passwordTypeStr); err != expected { - t.Errorf("password type %q should yield error %q", passwordTypeStr, expected.Error()) - } - }) - } - }) -} diff --git a/pkg/apiservermsgs/versionmsgs.go b/pkg/apiservermsgs/versionmsgs.go index 7685221c44..67c437f175 100644 --- a/pkg/apiservermsgs/versionmsgs.go +++ b/pkg/apiservermsgs/versionmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +Copyright 2017 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/watchmsgs.go b/pkg/apiservermsgs/watchmsgs.go index 9d50a81ccd..be5fe353cf 100644 --- a/pkg/apiservermsgs/watchmsgs.go +++ b/pkg/apiservermsgs/watchmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2019 - 2020 Crunchy Data Solutions, Inc. +Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/apiservermsgs/workflowmsgs.go b/pkg/apiservermsgs/workflowmsgs.go index 2908d75347..54c2dd14c2 100644 --- a/pkg/apiservermsgs/workflowmsgs.go +++ b/pkg/apiservermsgs/workflowmsgs.go @@ -1,7 +1,7 @@ package apiservermsgs /* -Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +Copyright 2018 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/events/eventing.go b/pkg/events/eventing.go index 96b544405b..9b7b32a33a 100644 --- a/pkg/events/eventing.go +++ b/pkg/events/eventing.go @@ -1,7 +1,7 @@ package events /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -19,18 +19,16 @@ import ( "encoding/json" "errors" "fmt" - crunchylog "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/nsqio/go-nsq" - log "github.com/sirupsen/logrus" "os" "reflect" "time" + + "github.com/nsqio/go-nsq" + log "github.com/sirupsen/logrus" ) // String returns the string form for a given LogLevel func Publish(e EventInterface) error { - //Add logging configuration - crunchylog.CrunchyLogger(crunchylog.SetParameters()) eventAddr := os.Getenv("EVENT_ADDR") if eventAddr == "" { return errors.New("EVENT_ADDR not set") @@ -41,7 +39,7 @@ func Publish(e EventInterface) error { } cfg := nsq.NewConfig() - //cfg.UserAgent = fmt.Sprintf("to_nsq/%s go-nsq/%s", version.Binary, nsq.VERSION) + // cfg.UserAgent = fmt.Sprintf("to_nsq/%s go-nsq/%s", version.Binary, nsq.VERSION) cfg.UserAgent = fmt.Sprintf("go-nsq/%s", nsq.VERSION) log.Debugf("publishing %s message %s", reflect.TypeOf(e), e.String()) @@ -78,7 +76,7 @@ func Publish(e EventInterface) error { } } - //always publish to the All topic + // always publish to the All topic err = producer.Publish(EventTopicAll, b) if err != nil { log.Errorf("Error: %s", err) diff --git a/pkg/events/eventtype.go b/pkg/events/eventtype.go index 43a709abde..6ee205094c 100644 --- a/pkg/events/eventtype.go +++ b/pkg/events/eventtype.go @@ -1,7 +1,7 @@ package events /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -33,6 +33,7 @@ const ( EventTopicPGOUser = "pgousertopic" EventTopicUpgrade = "upgradetopic" ) + const ( EventReloadCluster = "ReloadCluster" EventPrimaryNotReady = "PrimaryNotReady" @@ -44,8 +45,6 @@ const ( EventScaleClusterFailure = "ScaleClusterFailure" EventScaleDownCluster = "ScaleDownCluster" EventShutdownCluster = "ShutdownCluster" - EventFailoverCluster = "FailoverCluster" - EventFailoverClusterCompleted = "FailoverClusterCompleted" EventRestoreCluster = "RestoreCluster" EventRestoreClusterCompleted = "RestoreClusterCompleted" EventUpgradeCluster = "UpgradeCluster" @@ -53,7 +52,6 @@ const ( EventUpgradeClusterFailure = "UpgradeClusterFailure" EventDeleteCluster = "DeleteCluster" EventDeleteClusterCompleted = "DeleteClusterCompleted" - EventCreateLabel = "CreateLabel" EventCreateBackup = "CreateBackup" EventCreateBackupCompleted = "CreateBackupCompleted" @@ -103,21 +101,6 @@ type EventInterface interface { String() string } -//-------- -type EventReloadClusterFormat struct { - EventHeader `json:"eventheader"` - Clustername string `json:"clustername"` -} - -func (p EventReloadClusterFormat) GetHeader() EventHeader { - return p.EventHeader -} - -func (lvl EventReloadClusterFormat) String() string { - msg := fmt.Sprintf("Event %s - (reload) name %s", lvl.EventHeader, lvl.Clustername) - return msg -} - //---------------------------- type EventCreateClusterFailureFormat struct { EventHeader `json:"eventheader"` @@ -161,6 +144,7 @@ type EventCreateClusterCompletedFormat struct { func (p EventCreateClusterCompletedFormat) GetHeader() EventHeader { return p.EventHeader } + func (lvl EventCreateClusterCompletedFormat) String() string { msg := fmt.Sprintf("Event %s - (create cluster completed) clustername %s workflow %s", lvl.EventHeader, lvl.Clustername, lvl.WorkflowID) return msg @@ -215,38 +199,6 @@ func (lvl EventScaleDownClusterFormat) String() string { return msg } -//---------------------------- -type EventFailoverClusterFormat struct { - EventHeader `json:"eventheader"` - Clustername string `json:"clustername"` - Target string `json:"target"` -} - -func (p EventFailoverClusterFormat) GetHeader() EventHeader { - return p.EventHeader -} - -func (lvl EventFailoverClusterFormat) String() string { - msg := fmt.Sprintf("Event %s (failover) - clustername %s - target %s", lvl.EventHeader, lvl.Clustername, lvl.Target) - return msg -} - -//---------------------------- -type EventFailoverClusterCompletedFormat struct { - EventHeader `json:"eventheader"` - Clustername string `json:"clustername"` - Target string `json:"target"` -} - -func (p EventFailoverClusterCompletedFormat) GetHeader() EventHeader { - return p.EventHeader -} - -func (lvl EventFailoverClusterCompletedFormat) String() string { - msg := fmt.Sprintf("Event %s (failover completed) - clustername %s - target %s", lvl.EventHeader, lvl.Clustername, lvl.Target) - return msg -} - //---------------------------- type EventUpgradeClusterFormat struct { EventHeader `json:"eventheader"` @@ -360,22 +312,6 @@ func (lvl EventCreateBackupCompletedFormat) String() string { return msg } -//---------------------------- -type EventCreateLabelFormat struct { - EventHeader `json:"eventheader"` - Clustername string `json:"clustername"` - Label string `json:"label"` -} - -func (p EventCreateLabelFormat) GetHeader() EventHeader { - return p.EventHeader -} - -func (lvl EventCreateLabelFormat) String() string { - msg := fmt.Sprintf("Event %s (create label) - clustername %s - label [%s]", lvl.EventHeader, lvl.Clustername, lvl.Label) - return msg -} - //---------------------------- type EventCreatePolicyFormat struct { EventHeader `json:"eventheader"` diff --git a/pkg/events/pgoeventtype.go b/pkg/events/pgoeventtype.go index 75c076b311..566294e302 100644 --- a/pkg/events/pgoeventtype.go +++ b/pkg/events/pgoeventtype.go @@ -1,7 +1,7 @@ package events /* - Copyright 2019 - 2020 Crunchy Data Solutions, Inc. + Copyright 2019 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/clientset.go b/pkg/generated/clientset/versioned/clientset.go index 061bbaed95..4faabe8ad5 100644 --- a/pkg/generated/clientset/versioned/clientset.go +++ b/pkg/generated/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/doc.go b/pkg/generated/clientset/versioned/doc.go index e2534c0fe7..663a401c6c 100644 --- a/pkg/generated/clientset/versioned/doc.go +++ b/pkg/generated/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go index 384d0e7737..0a25103a8c 100644 --- a/pkg/generated/clientset/versioned/fake/clientset_generated.go +++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/fake/doc.go b/pkg/generated/clientset/versioned/fake/doc.go index 6318a06f3c..130eb64878 100644 --- a/pkg/generated/clientset/versioned/fake/doc.go +++ b/pkg/generated/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go index 62032825ad..26b3aca655 100644 --- a/pkg/generated/clientset/versioned/fake/register.go +++ b/pkg/generated/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/scheme/doc.go b/pkg/generated/clientset/versioned/scheme/doc.go index 462fec5e30..061044abc0 100644 --- a/pkg/generated/clientset/versioned/scheme/doc.go +++ b/pkg/generated/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/scheme/register.go b/pkg/generated/clientset/versioned/scheme/register.go index 4850f74045..0ed3f8025d 100644 --- a/pkg/generated/clientset/versioned/scheme/register.go +++ b/pkg/generated/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/crunchydata.com_client.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/crunchydata.com_client.go index aac71b2aa3..2f4cbe7439 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/crunchydata.com_client.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/crunchydata.com_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/doc.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/doc.go index b7311c21af..9036ada13a 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/doc.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/doc.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/doc.go index 759d8fff95..5b67373668 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/doc.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_crunchydata.com_client.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_crunchydata.com_client.go index f8d6b6b350..df4d5bfbaf 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_crunchydata.com_client.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_crunchydata.com_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgcluster.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgcluster.go index 177fe4240c..b36d54c565 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgcluster.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgcluster.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgpolicy.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgpolicy.go index 746a49a17c..574944c5dc 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgpolicy.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgreplica.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgreplica.go index 70a1e8a559..8aea3797d7 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgreplica.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgreplica.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgtask.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgtask.go index 6ec34a55fd..b1d615a176 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgtask.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/fake/fake_pgtask.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/generated_expansion.go index 066f811e51..c2dead17b5 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgcluster.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgcluster.go index 6ccbb22d73..43b2b53ac2 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgcluster.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgcluster.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgpolicy.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgpolicy.go index 1d9711033c..18dc4c1d7b 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgpolicy.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgreplica.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgreplica.go index f9ffed63eb..00073c38c5 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgreplica.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgreplica.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgtask.go b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgtask.go index 5971a76095..d8c2b9b7b7 100644 --- a/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgtask.go +++ b/pkg/generated/clientset/versioned/typed/crunchydata.com/v1/pgtask.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/informers/externalversions/crunchydata.com/interface.go b/pkg/generated/informers/externalversions/crunchydata.com/interface.go index dfe44a0fcb..80a637ab5b 100644 --- a/pkg/generated/informers/externalversions/crunchydata.com/interface.go +++ b/pkg/generated/informers/externalversions/crunchydata.com/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/informers/externalversions/crunchydata.com/v1/interface.go b/pkg/generated/informers/externalversions/crunchydata.com/v1/interface.go index c34a37f8e7..fe68646ca0 100644 --- a/pkg/generated/informers/externalversions/crunchydata.com/v1/interface.go +++ b/pkg/generated/informers/externalversions/crunchydata.com/v1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/informers/externalversions/crunchydata.com/v1/pgcluster.go b/pkg/generated/informers/externalversions/crunchydata.com/v1/pgcluster.go index c11596abe9..2d874bfd96 100644 --- a/pkg/generated/informers/externalversions/crunchydata.com/v1/pgcluster.go +++ b/pkg/generated/informers/externalversions/crunchydata.com/v1/pgcluster.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/informers/externalversions/crunchydata.com/v1/pgpolicy.go b/pkg/generated/informers/externalversions/crunchydata.com/v1/pgpolicy.go index 2016ae2a2e..3e23c5120e 100644 --- a/pkg/generated/informers/externalversions/crunchydata.com/v1/pgpolicy.go +++ b/pkg/generated/informers/externalversions/crunchydata.com/v1/pgpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/informers/externalversions/crunchydata.com/v1/pgreplica.go b/pkg/generated/informers/externalversions/crunchydata.com/v1/pgreplica.go index 9387d0937b..8141c330df 100644 --- a/pkg/generated/informers/externalversions/crunchydata.com/v1/pgreplica.go +++ b/pkg/generated/informers/externalversions/crunchydata.com/v1/pgreplica.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/informers/externalversions/crunchydata.com/v1/pgtask.go b/pkg/generated/informers/externalversions/crunchydata.com/v1/pgtask.go index d08c342305..ad64b2717a 100644 --- a/pkg/generated/informers/externalversions/crunchydata.com/v1/pgtask.go +++ b/pkg/generated/informers/externalversions/crunchydata.com/v1/pgtask.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/informers/externalversions/factory.go b/pkg/generated/informers/externalversions/factory.go index 56886a005a..9fb82ec40f 100644 --- a/pkg/generated/informers/externalversions/factory.go +++ b/pkg/generated/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 130dd5ad37..0ea3cb4983 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go index 4086ab3a09..1ac6cd63dc 100644 --- a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/listers/crunchydata.com/v1/expansion_generated.go b/pkg/generated/listers/crunchydata.com/v1/expansion_generated.go index ca6b77b1a3..7cef2e7d88 100644 --- a/pkg/generated/listers/crunchydata.com/v1/expansion_generated.go +++ b/pkg/generated/listers/crunchydata.com/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/listers/crunchydata.com/v1/pgcluster.go b/pkg/generated/listers/crunchydata.com/v1/pgcluster.go index 4dd8121f86..e8672cb0f0 100644 --- a/pkg/generated/listers/crunchydata.com/v1/pgcluster.go +++ b/pkg/generated/listers/crunchydata.com/v1/pgcluster.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/listers/crunchydata.com/v1/pgpolicy.go b/pkg/generated/listers/crunchydata.com/v1/pgpolicy.go index 03740c4b71..44f7b59544 100644 --- a/pkg/generated/listers/crunchydata.com/v1/pgpolicy.go +++ b/pkg/generated/listers/crunchydata.com/v1/pgpolicy.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/listers/crunchydata.com/v1/pgreplica.go b/pkg/generated/listers/crunchydata.com/v1/pgreplica.go index b6cee83186..f9ea4f7167 100644 --- a/pkg/generated/listers/crunchydata.com/v1/pgreplica.go +++ b/pkg/generated/listers/crunchydata.com/v1/pgreplica.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pkg/generated/listers/crunchydata.com/v1/pgtask.go b/pkg/generated/listers/crunchydata.com/v1/pgtask.go index c7d30868a8..50e4245c7d 100644 --- a/pkg/generated/listers/crunchydata.com/v1/pgtask.go +++ b/pkg/generated/listers/crunchydata.com/v1/pgtask.go @@ -1,5 +1,5 @@ /* -Copyright 2020 Crunchy Data Solutions, Inc. +Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/pv/create-pv-nfs-label.sh b/pv/create-pv-nfs-label.sh index a77e3e68e3..6192373051 100755 --- a/pv/create-pv-nfs-label.sh +++ b/pv/create-pv-nfs-label.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2018 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2018 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/pv/create-pv-nfs-legacy.sh b/pv/create-pv-nfs-legacy.sh index 4850e73652..514a910dae 100755 --- a/pv/create-pv-nfs-legacy.sh +++ b/pv/create-pv-nfs-legacy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/pv/create-pv-nfs.sh b/pv/create-pv-nfs.sh index 8b2ef4ab67..27c55a9e41 100755 --- a/pv/create-pv-nfs.sh +++ b/pv/create-pv-nfs.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/pv/create-pv.sh b/pv/create-pv.sh index 46bbf4dbe8..afc18c28fc 100755 --- a/pv/create-pv.sh +++ b/pv/create-pv.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/pv/delete-pv.sh b/pv/delete-pv.sh index cd653a1778..c8178402cd 100755 --- a/pv/delete-pv.sh +++ b/pv/delete-pv.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 - 2020 Crunchy Data Solutions, Inc. +# Copyright 2017 - 2023 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/redhat/atomic/help.1 b/redhat/atomic/help.1 index bc21518dd8..65572d4c53 100644 --- a/redhat/atomic/help.1 +++ b/redhat/atomic/help.1 @@ -56,4 +56,4 @@ The Red Hat Enterprise Linux version from which the container was built. For exa \fB\fCRelease=\fR .PP -The specific release number of the container. For example, Release="4.5.0" +The specific release number of the container. For example, Release="4.6.10" diff --git a/redhat/atomic/help.md b/redhat/atomic/help.md index 8950e24d47..52f10b95be 100644 --- a/redhat/atomic/help.md +++ b/redhat/atomic/help.md @@ -45,4 +45,4 @@ The Red Hat Enterprise Linux version from which the container was built. For exa `Release=` -The specific release number of the container. For example, Release="4.5.0" +The specific release number of the container. For example, Release="4.6.10" diff --git a/testing/kubeapi/deployment.go b/testing/kubeapi/deployment.go index 4b864d326a..52ab85e4c9 100644 --- a/testing/kubeapi/deployment.go +++ b/testing/kubeapi/deployment.go @@ -22,3 +22,13 @@ func (k *KubeAPI) ListDeployments(namespace string, labels map[string]string) ([ return list.Items, err } + +// GetDeployment returns deployment by name, if exists. +func (k *KubeAPI) GetDeployment(namespace, name string) *apps_v1.Deployment { + deployment, err := k.Client.AppsV1().Deployments(namespace).Get(name, meta_v1.GetOptions{}) + if deployment == nil && err != nil { + deployment = &apps_v1.Deployment{} + } + + return deployment +} diff --git a/testing/pgo_cli/cluster_annotation_test.go b/testing/pgo_cli/cluster_annotation_test.go new file mode 100644 index 0000000000..a4a85fab97 --- /dev/null +++ b/testing/pgo_cli/cluster_annotation_test.go @@ -0,0 +1,250 @@ +package pgo_cli_test + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestClusterAnnotation(t *testing.T) { + t.Parallel() + + withNamespace(t, func(namespace func() string) { + t.Run("on create", func(t *testing.T) { + t.Parallel() + tests := []struct { + testName string + annotations map[string]string + clusterFlags []string + addFlags []string + removeFlags []string + deployments []string + }{ + { + testName: "create-global", + annotations: map[string]string{ + "global": "here", + "global2": "foo", + }, + clusterFlags: []string{"--pgbouncer"}, + addFlags: []string{"--annotation=global=here", "--annotation=global2=foo"}, + removeFlags: []string{"--annotation=global-", "--annotation=global2-"}, + deployments: []string{"create-global", "create-global-backrest-shared-repo", "create-global-pgbouncer"}, + }, { + testName: "create-postgres", + annotations: map[string]string{ + "postgres": "present", + }, + clusterFlags: []string{}, + addFlags: []string{"--annotation-postgres=postgres=present"}, + removeFlags: []string{"--annotation-postgres=postgres-"}, + deployments: []string{"create-postgres"}, + }, { + testName: "create-pgbackrest", + annotations: map[string]string{ + "pgbackrest": "what", + }, + clusterFlags: []string{}, + addFlags: []string{"--annotation-pgbackrest=pgbackrest=what"}, + removeFlags: []string{"--annotation-pgbackrest=pgbackrest-"}, + deployments: []string{"create-pgbackrest-backrest-shared-repo"}, + }, { + testName: "create-pgbouncer", + annotations: map[string]string{ + "pgbouncer": "aqui", + }, + clusterFlags: []string{"--pgbouncer"}, + addFlags: []string{"--annotation-pgbouncer=pgbouncer=aqui"}, + removeFlags: []string{"--annotation-pgbouncer=pgbouncer-"}, + deployments: []string{"create-pgbouncer-pgbouncer"}, + }, { + testName: "remove-one", + annotations: map[string]string{ + "leave": "me", + }, + clusterFlags: []string{"--pgbouncer"}, + addFlags: []string{"--annotation=remove=me", "--annotation=leave=me"}, + removeFlags: []string{"--annotation=remove-"}, + deployments: []string{"remove-one", "remove-one-backrest-shared-repo", "remove-one-pgbouncer"}, + }, + } + + for _, test := range tests { + test := test // lock test variable in for each run since it changes across parallel loops + t.Run(test.testName, func(t *testing.T) { + t.Parallel() + createCMD := []string{"create", "cluster", test.testName, "-n", namespace()} + createCMD = append(createCMD, test.clusterFlags...) + createCMD = append(createCMD, test.addFlags...) + output, err := pgo(createCMD...).Exec(t) + t.Cleanup(func() { + teardownCluster(t, namespace(), test.testName, time.Now()) + }) + require.NoError(t, err) + require.Contains(t, output, "created cluster:") + + requireClusterReady(t, namespace(), test.testName, (2 * time.Minute)) + if contains(test.clusterFlags, "--pgbouncer") { + requirePgBouncerReady(t, namespace(), test.testName, (2 * time.Minute)) + } + + t.Run("add", func(t *testing.T) { + for _, deploymentName := range test.deployments { + for expectedKey, expectedValue := range test.annotations { + hasAnnotation := func() bool { + actualAnnotations := TestContext.Kubernetes.GetDeployment(namespace(), deploymentName).Spec.Template.ObjectMeta.GetAnnotations() + actualValue := actualAnnotations[expectedKey] + if actualValue == expectedValue { + return true + } + + return false + } + + requireWaitFor(t, hasAnnotation, time.Minute, time.Second, + "timeout waiting for deployment \"%q\" to have annotation \"%s: %s\"", deploymentName, expectedKey, expectedValue) + } + } + }) + + t.Run("remove", func(t *testing.T) { + t.Skip("Bug: annotation in not removed on update") + updateCMD := []string{"update", "cluster", test.testName, "-n", namespace(), "--no-prompt"} + updateCMD = append(updateCMD, test.removeFlags...) + output, err := pgo(updateCMD...).Exec(t) + require.NoError(t, err) + require.Contains(t, output, "updated pgcluster") + + for _, deploymentName := range test.deployments { + for expectedKey, _ := range test.annotations { + notHasAnnotation := func() bool { + actualAnnotations := TestContext.Kubernetes.GetDeployment(namespace(), deploymentName).Spec.Template.ObjectMeta.GetAnnotations() + actualValue := actualAnnotations[expectedKey] + if actualValue != "" { + return false + } + + return true + } + + requireWaitFor(t, notHasAnnotation, time.Minute, time.Second, + "timeout waiting for annotation key \"%s\" to be removed from deployment \"%s\"", expectedKey, deploymentName) + } + } + }) + }) + } + }) + + t.Run("on update", func(t *testing.T) { + t.Parallel() + + tests := []struct { + testName string + annotations map[string]string + clusterFlags []string + addFlags []string + deployments []string + }{ + { + testName: "update-global", + annotations: map[string]string{ + "global": "here", + "global2": "foo", + }, + clusterFlags: []string{"--pgbouncer"}, + addFlags: []string{"--annotation=global=here", "--annotation=global2=foo"}, + deployments: []string{"update-global", "update-global-backrest-shared-repo", "update-global-pgbouncer"}, + }, { + testName: "update-postgres", + annotations: map[string]string{ + "postgres": "present", + }, + clusterFlags: []string{}, + addFlags: []string{"--annotation-postgres=postgres=present"}, + deployments: []string{"update-postgres"}, + }, { + testName: "update-pgbackrest", + annotations: map[string]string{ + "pgbackrest": "what", + }, + clusterFlags: []string{}, + addFlags: []string{"--annotation-pgbackrest=pgbackrest=what"}, + deployments: []string{"update-pgbackrest-backrest-shared-repo"}, + }, { + testName: "update-pgbouncer", + annotations: map[string]string{ + "pgbouncer": "aqui", + }, + clusterFlags: []string{"--pgbouncer"}, + addFlags: []string{"--annotation-pgbouncer=pgbouncer=aqui"}, + deployments: []string{"update-pgbouncer-pgbouncer"}, + }, + } + + for _, test := range tests { + test := test // lock test variable in for each run since it changes across parallel loops + t.Run(test.testName, func(t *testing.T) { + t.Parallel() + createCMD := []string{"create", "cluster", test.testName, "-n", namespace()} + createCMD = append(createCMD, test.clusterFlags...) + output, err := pgo(createCMD...).Exec(t) + t.Cleanup(func() { + teardownCluster(t, namespace(), test.testName, time.Now()) + }) + require.NoError(t, err) + require.Contains(t, output, "created cluster:") + + requireClusterReady(t, namespace(), test.testName, (2 * time.Minute)) + if contains(test.clusterFlags, "--pgbouncer") { + requirePgBouncerReady(t, namespace(), test.testName, (2 * time.Minute)) + } + + // Allow pods time to reach ready status before running update. + time.Sleep(time.Minute + 30*time.Second) + + updateCMD := []string{"update", "cluster", test.testName, "-n", namespace(), "--no-prompt"} + updateCMD = append(updateCMD, test.addFlags...) + output, err = pgo(updateCMD...).Exec(t) + require.NoError(t, err) + require.Contains(t, output, "updated pgcluster") + + t.Run("add", func(t *testing.T) { + for _, deploymentName := range test.deployments { + for expectedKey, expectedValue := range test.annotations { + hasAnnotation := func() bool { + actualAnnotations := TestContext.Kubernetes.GetDeployment(namespace(), deploymentName).Spec.Template.ObjectMeta.GetAnnotations() + actualValue := actualAnnotations[expectedKey] + if actualValue == expectedValue { + return true + } + + return false + } + + requireWaitFor(t, hasAnnotation, time.Minute, time.Second, + "timeout waiting for deployment \"%q\" to have annotation \"%s: %s\"", deploymentName, expectedKey, expectedValue) + } + } + }) + }) + } + }) + }) +} diff --git a/testing/pgo_cli/cluster_backup_test.go b/testing/pgo_cli/cluster_backup_test.go index d2f8508c3d..a1e0951c36 100644 --- a/testing/pgo_cli/cluster_backup_test.go +++ b/testing/pgo_cli/cluster_backup_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -168,7 +168,7 @@ func TestClusterBackup(t *testing.T) { }) t.Run("restore", func(t *testing.T) { - t.Run("replaces the cluster", func(t *testing.T) { + t.Run("keeps existing pvc", func(t *testing.T) { t.Parallel() withCluster(t, namespace, func(cluster func() string) { requireClusterReady(t, namespace(), cluster(), time.Minute) @@ -195,10 +195,10 @@ func TestClusterBackup(t *testing.T) { after := clusterPVCs(t, namespace(), cluster()) for _, pvc := range after { // check to see if the PVC for the primary is bound, and has a timestamp - // after the original timestamp for the primary PVC timestamp captured above, - // indicating that it been re-created + // equal to the original timestamp for the primary PVC timestamp captured above, + // indicating that it has not been re-created if pvc.GetName() == cluster() && kubeapi.IsPVCBound(pvc) && - pvc.GetCreationTimestamp().Time.After(primaryPVCCreationTimestamp) { + pvc.GetCreationTimestamp().Time.Equal(primaryPVCCreationTimestamp) { return true } } diff --git a/testing/pgo_cli/cluster_cat_test.go b/testing/pgo_cli/cluster_cat_test.go index 4cb159be8d..3f9617a4eb 100644 --- a/testing/pgo_cli/cluster_cat_test.go +++ b/testing/pgo_cli/cluster_cat_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_create_test.go b/testing/pgo_cli/cluster_create_test.go index 26f0c2be4f..c47b92e4db 100644 --- a/testing/pgo_cli/cluster_create_test.go +++ b/testing/pgo_cli/cluster_create_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_delete_test.go b/testing/pgo_cli/cluster_delete_test.go index cba99408f7..5f62f24f61 100644 --- a/testing/pgo_cli/cluster_delete_test.go +++ b/testing/pgo_cli/cluster_delete_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_df_test.go b/testing/pgo_cli/cluster_df_test.go index 8171a7aa45..dec3ba7ef9 100644 --- a/testing/pgo_cli/cluster_df_test.go +++ b/testing/pgo_cli/cluster_df_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_failover_test.go b/testing/pgo_cli/cluster_failover_test.go index d35a6d87f5..5628dedf3b 100644 --- a/testing/pgo_cli/cluster_failover_test.go +++ b/testing/pgo_cli/cluster_failover_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -68,7 +68,7 @@ func TestClusterFailover(t *testing.T) { "--target="+before[0].Labels["deployment-name"], "--no-prompt", ).Exec(t) require.NoError(t, err) - require.Contains(t, output, "created") + require.Contains(t, output, "success") replaced := func() bool { after := replicaPods(t, namespace(), cluster()) diff --git a/testing/pgo_cli/cluster_label_test.go b/testing/pgo_cli/cluster_label_test.go index 0f54f7af93..7c001b52c6 100644 --- a/testing/pgo_cli/cluster_label_test.go +++ b/testing/pgo_cli/cluster_label_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_pgadmin_test.go b/testing/pgo_cli/cluster_pgadmin_test.go new file mode 100644 index 0000000000..627beeeace --- /dev/null +++ b/testing/pgo_cli/cluster_pgadmin_test.go @@ -0,0 +1,106 @@ +package pgo_cli_test + +/* + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import ( + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestClusterPgAdmin(t *testing.T) { + t.Parallel() + + var pgadminOnce sync.Once + requirePgAdmin := func(t *testing.T, namespace, cluster string) { + pgadminOnce.Do(func() { + output, err := pgo("create", "pgadmin", cluster, "-n", namespace).Exec(t) + require.NoError(t, err) + require.Contains(t, output, "addition scheduled") + }) + } + + withNamespace(t, func(namespace func() string) { + withCluster(t, namespace, func(cluster func() string) { + t.Run("create pgadmin", func(t *testing.T) { + t.Run("starts PgAdmin", func(t *testing.T) { + requireClusterReady(t, namespace(), cluster(), time.Minute) + requirePgAdmin(t, namespace(), cluster()) + + // PgAdmin does not appear immediately. + requirePgAdminReady(t, namespace(), cluster(), time.Minute) + + // Here we wait 5 seconds to ensure pgAdmin has deployed in a stable way. + // Without this sleep, the test can pass because the pgAdmin container is + // (briefly) stable. + time.Sleep(time.Duration(5) * time.Second) + + output, err := pgo("show", "cluster", cluster(), "-n", namespace()).Exec(t) + require.NoError(t, err) + require.Contains(t, output, "pgadmin") + + output, err = pgo("test", cluster(), "-n", namespace()).Exec(t) + require.NoError(t, err) + require.Contains(t, output, "pgadmin", "expected PgAdmin to be discoverable") + + for _, line := range strings.Split(output, "\n") { + if strings.Contains(line, "pgadmin") { + require.Contains(t, line, "UP", "expected PgAdmin to be accessible") + } + } + }) + }) + + t.Run("delete pgadmin", func(t *testing.T) { + t.Run("stops PgAdmin", func(t *testing.T) { + requireClusterReady(t, namespace(), cluster(), time.Minute) + requirePgAdmin(t, namespace(), cluster()) + requirePgAdminReady(t, namespace(), cluster(), time.Minute) + + output, err := pgo("delete", "pgadmin", cluster(), "--no-prompt", "-n", namespace()).Exec(t) + require.NoError(t, err) + require.Contains(t, output, "delete scheduled") + + gone := func() bool { + deployments, err := TestContext.Kubernetes.ListDeployments(namespace(), map[string]string{ + "pg-cluster": cluster(), + "crunchy-pgadmin": "true", + }) + require.NoError(t, err) + return len(deployments) == 0 + } + requireWaitFor(t, gone, time.Minute, time.Second, + "timeout waiting for PgAdmin of %q in %q", cluster(), namespace()) + + output, err = pgo("show", "cluster", cluster(), "-n", namespace()).Exec(t) + require.NoError(t, err) + + //require.NotContains(t, output, "pgadmin") + for _, line := range strings.Split(output, "\n") { + // The service and deployment should be gone. The only remaining + // reference could be in the labels. + if strings.Contains(line, "pgadmin") { + require.Contains(t, line, "pgadmin=false") + } + } + }) + }) + }) + }) +} diff --git a/testing/pgo_cli/cluster_pgbouncer_test.go b/testing/pgo_cli/cluster_pgbouncer_test.go index 9c5b72ba66..dc70f490fd 100644 --- a/testing/pgo_cli/cluster_pgbouncer_test.go +++ b/testing/pgo_cli/cluster_pgbouncer_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_policy_test.go b/testing/pgo_cli/cluster_policy_test.go index df7197deb4..1d0cac3bec 100644 --- a/testing/pgo_cli/cluster_policy_test.go +++ b/testing/pgo_cli/cluster_policy_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_pvc_test.go b/testing/pgo_cli/cluster_pvc_test.go index bd91b28435..80f9f4abf4 100644 --- a/testing/pgo_cli/cluster_pvc_test.go +++ b/testing/pgo_cli/cluster_pvc_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_reload_test.go b/testing/pgo_cli/cluster_reload_test.go index e2900ee4fb..0c2a14528b 100644 --- a/testing/pgo_cli/cluster_reload_test.go +++ b/testing/pgo_cli/cluster_reload_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_restart_test.go b/testing/pgo_cli/cluster_restart_test.go index 3f438b6570..6a1c4eeacf 100644 --- a/testing/pgo_cli/cluster_restart_test.go +++ b/testing/pgo_cli/cluster_restart_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -114,7 +114,7 @@ func TestRestart(t *testing.T) { // now update a PG setting updatePGConfigDCS(t, cluster(), namespace(), - map[string]string{"unix_socket_directories": "/tmp,/crunchyadm,/tmp/e2e"}) + map[string]string{"unix_socket_directories": "/tmp,/tmp/e2e"}) requiresRestartPrimaryReplica := func() bool { output, err := pgo(restartQueryCMD...).Exec(t) diff --git a/testing/pgo_cli/cluster_scale_test.go b/testing/pgo_cli/cluster_scale_test.go index 11ce9a9c21..a2e61fedc8 100644 --- a/testing/pgo_cli/cluster_scale_test.go +++ b/testing/pgo_cli/cluster_scale_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_scaledown_test.go b/testing/pgo_cli/cluster_scaledown_test.go index f1926a4d4d..f079f35539 100644 --- a/testing/pgo_cli/cluster_scaledown_test.go +++ b/testing/pgo_cli/cluster_scaledown_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_test_test.go b/testing/pgo_cli/cluster_test_test.go index 153d47f467..af1966ce33 100644 --- a/testing/pgo_cli/cluster_test_test.go +++ b/testing/pgo_cli/cluster_test_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/cluster_user_test.go b/testing/pgo_cli/cluster_user_test.go index 9e59757a9a..5cd8bc0cd1 100644 --- a/testing/pgo_cli/cluster_user_test.go +++ b/testing/pgo_cli/cluster_user_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/operator_namespace_test.go b/testing/pgo_cli/operator_namespace_test.go index ef327c2ece..fec5241657 100644 --- a/testing/pgo_cli/operator_namespace_test.go +++ b/testing/pgo_cli/operator_namespace_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/operator_rbac_test.go b/testing/pgo_cli/operator_rbac_test.go index 8fa4609894..f7fd42d6a5 100644 --- a/testing/pgo_cli/operator_rbac_test.go +++ b/testing/pgo_cli/operator_rbac_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/operator_test.go b/testing/pgo_cli/operator_test.go index 743b872614..401a5ac22c 100644 --- a/testing/pgo_cli/operator_test.go +++ b/testing/pgo_cli/operator_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/suite_helpers_test.go b/testing/pgo_cli/suite_helpers_test.go index dcaee7b0f0..8ae205bb94 100644 --- a/testing/pgo_cli/suite_helpers_test.go +++ b/testing/pgo_cli/suite_helpers_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -44,6 +44,16 @@ type Pool struct { func (p *Pool) Close() error { p.Pool.Close(); return p.Proxy.Close() } +// contains will take a string slice and check if it contains a string +func contains(s []string, str string) bool { + for _, v := range s { + if v == str { + return true + } + } + return false +} + // clusterConnection opens a PostgreSQL connection to a database pod. Any error // will cause t to FailNow. func clusterConnection(t testing.TB, namespace, cluster, dsn string) *Pool { @@ -188,6 +198,37 @@ func requireClusterReady(t testing.TB, namespace, cluster string, timeout time.D } } +// requirePgAdminReady waits until all PgAdmin deployments for cluster are +// ready. If timeout elapses or any error occurs, t will FailNow. +func requirePgAdminReady(t testing.TB, namespace, cluster string, timeout time.Duration) { + t.Helper() + + ready := func() bool { + deployments, err := TestContext.Kubernetes.ListDeployments(namespace, map[string]string{ + "pg-cluster": cluster, + "crunchy-pgadmin": "true", + }) + require.NoError(t, err) + + if len(deployments) == 0 { + return false + } + for _, deployment := range deployments { + if *deployment.Spec.Replicas < 1 || + deployment.Status.ReadyReplicas != *deployment.Spec.Replicas || + deployment.Status.UpdatedReplicas != *deployment.Spec.Replicas { + return false + } + } + return true + } + + if !ready() { + requireWaitFor(t, ready, timeout, time.Second, + "timeout waiting for PgAdmin of %q in %q", cluster, namespace) + } +} + // requirePgBouncerReady waits until all PgBouncer deployments for cluster are // ready. If timeout elapses or any error occurs, t will FailNow. func requirePgBouncerReady(t testing.TB, namespace, cluster string, timeout time.Duration) { @@ -327,12 +368,6 @@ func withCluster(t testing.TB, namespace func() string, during func(func() strin var name string var once sync.Once - defer func() { - if name != "" { - teardownCluster(t, namespace(), name, created) - } - }() - during(func() string { once.Do(func() { generated := names.SimpleNameGenerator.GenerateName("pgo-test-") @@ -343,6 +378,10 @@ func withCluster(t testing.TB, namespace func() string, during func(func() strin created = time.Now() name = generated } + + t.Cleanup(func() { + teardownCluster(t, namespace(), name, created) + }) }) return name }) @@ -364,13 +403,6 @@ func withNamespace(t testing.TB, during func(func() string)) { var namespace *core_v1.Namespace var once sync.Once - defer func() { - if namespace != nil { - err := TestContext.Kubernetes.DeleteNamespace(namespace.Name) - assert.NoErrorf(t, err, "unable to tear down namespace %q", namespace.Name) - } - }() - during(func() string { once.Do(func() { ns, err := TestContext.Kubernetes.GenerateNamespace( @@ -381,6 +413,11 @@ func withNamespace(t testing.TB, during func(func() string)) { _, err = pgo("update", "namespace", namespace.Name).Exec(t) assert.NoErrorf(t, err, "unable to take ownership of namespace %q", namespace.Name) } + + t.Cleanup(func() { + err := TestContext.Kubernetes.DeleteNamespace(namespace.Name) + assert.NoErrorf(t, err, "unable to tear down namespace %q", namespace.Name) + }) }) return namespace.Name diff --git a/testing/pgo_cli/suite_pgo_cmd_test.go b/testing/pgo_cli/suite_pgo_cmd_test.go index 91aec62228..1721c702e1 100644 --- a/testing/pgo_cli/suite_pgo_cmd_test.go +++ b/testing/pgo_cli/suite_pgo_cmd_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/testing/pgo_cli/suite_test.go b/testing/pgo_cli/suite_test.go index 4f2056d08e..adbbb5971f 100644 --- a/testing/pgo_cli/suite_test.go +++ b/testing/pgo_cli/suite_test.go @@ -1,7 +1,7 @@ package pgo_cli_test /* - Copyright 2020 Crunchy Data Solutions, Inc. + Copyright 2020 - 2023 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at