From 21701f2a0bb4949f676563dd3feb8632329c9d9e Mon Sep 17 00:00:00 2001 From: Sarah Conway Date: Wed, 13 Jun 2018 08:49:14 -0700 Subject: [PATCH 1/2] move Upgrading section to bottom --- hugo/content/installation/configuration.adoc | 2 +- hugo/content/installation/deployment.adoc | 2 +- hugo/content/installation/upgrading-the-operator.adoc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hugo/content/installation/configuration.adoc b/hugo/content/installation/configuration.adoc index 9f25162964..289b0cf722 100644 --- a/hugo/content/installation/configuration.adoc +++ b/hugo/content/installation/configuration.adoc @@ -2,7 +2,7 @@ title: "Configuration" date: 2018-04-24T18:26:56-07:00 draft: false -weight: 60 +weight: 50 --- :toc: diff --git a/hugo/content/installation/deployment.adoc b/hugo/content/installation/deployment.adoc index 773798c094..49d47de79a 100644 --- a/hugo/content/installation/deployment.adoc +++ b/hugo/content/installation/deployment.adoc @@ -2,7 +2,7 @@ title: "Deployment" date: 2018-04-26T15:26:40-07:00 draft: false -weight: 50 +weight: 40 --- :toc: diff --git a/hugo/content/installation/upgrading-the-operator.adoc b/hugo/content/installation/upgrading-the-operator.adoc index 5500ef3aec..3d3ff41ec5 100644 --- a/hugo/content/installation/upgrading-the-operator.adoc +++ b/hugo/content/installation/upgrading-the-operator.adoc @@ -2,7 +2,7 @@ title: "Upgrading the Operator" date: 2018-04-24T18:27:30-07:00 draft: false -weight: 40 +weight: 60 --- v3.1, {docdate} From bd1607a326c06d4b9f45a1c3751147ad37d31b67 Mon Sep 17 00:00:00 2001 From: Sarah Conway Date: Wed, 13 Jun 2018 08:49:33 -0700 Subject: [PATCH 2/2] Publishing documentation --- docs/404.html | 16 +-- docs/categories/index.html | 16 +-- docs/contributing/index.html | 20 ++-- docs/getting-started/index.html | 94 +++++++++------ docs/how-it-works/index.html | 113 ++++++++++++++++-- docs/index.html | 18 +-- docs/index.json | 34 +++--- docs/index.xml | 41 +++---- docs/installation/configuration/index.html | 42 ++++--- docs/installation/deployment/index.html | 30 ++--- docs/installation/helm-chart/index.html | 22 ++-- docs/installation/index.html | 18 +-- docs/installation/index.xml | 41 +++---- .../manual-installation/index.html | 90 +++++++------- .../quick-installation/index.html | 22 ++-- .../upgrading-the-operator/index.html | 24 ++-- docs/sitemap.xml | 12 +- docs/tags/index.html | 16 +-- docs/theme-original/style.css | 50 ++------ 19 files changed, 404 insertions(+), 315 deletions(-) diff --git a/docs/404.html b/docs/404.html index 187c147e9a..054f6cfb76 100644 --- a/docs/404.html +++ b/docs/404.html @@ -5,7 +5,7 @@ - + 404 Page not found :: Crunchy Data PostgreSQL Operator Documentation @@ -91,13 +91,6 @@ Helm Chart - -
  • -
    - - Upgrading the Operator - -
  • @@ -112,6 +105,13 @@ Configuration
    +
  • +
  • +
    + + Upgrading the Operator + +
  • diff --git a/docs/categories/index.html b/docs/categories/index.html index 9f94e5d214..5f39461b4a 100644 --- a/docs/categories/index.html +++ b/docs/categories/index.html @@ -5,7 +5,7 @@ - + Categories :: Crunchy Data PostgreSQL Operator Documentation @@ -91,13 +91,6 @@ Helm Chart - -
  • -
    - - Upgrading the Operator - -
  • @@ -112,6 +105,13 @@ Configuration
    +
  • +
  • +
    + + Upgrading the Operator + +
  • diff --git a/docs/contributing/index.html b/docs/contributing/index.html index 4f857b7dc4..0d184215b9 100644 --- a/docs/contributing/index.html +++ b/docs/contributing/index.html @@ -5,7 +5,7 @@ - + Contributing :: Crunchy Data PostgreSQL Operator Documentation @@ -91,13 +91,6 @@ Helm Chart - -
  • -
    - - Upgrading the Operator - -
  • @@ -112,6 +105,13 @@ Configuration
    +
  • +
  • +
    + + Upgrading the Operator + +
  • @@ -220,7 +220,7 @@

    Contributing

    -

    v3.0, 2018-06-04

    +

    v3.1, 2018-06-13

    Getting Started

    @@ -272,7 +272,7 @@

    Documentation

    diff --git a/docs/getting-started/index.html b/docs/getting-started/index.html index 3154c7aed9..dbae1d2196 100644 --- a/docs/getting-started/index.html +++ b/docs/getting-started/index.html @@ -5,7 +5,7 @@ - + Getting Started :: Crunchy Data PostgreSQL Operator Documentation @@ -91,13 +91,6 @@ Helm Chart - -
  • -
    - - Upgrading the Operator - -
  • @@ -112,6 +105,13 @@ Configuration
    +
  • +
  • +
    + + Upgrading the Operator + +
  • @@ -249,7 +249,7 @@

    Getting Started

    -

    v3.0, 2018-06-04

    +

    v3.1, 2018-06-13

    pgo Commands

    @@ -410,6 +410,22 @@

    pgo create cluster

    This will cause a new PVC to be created to hold archive logs. Space is consumed by these logs but archives enable you to perform Point-In-Time-Recovery.

    +
    +

    To enable auto failover on this cluster, use the following flag:

    +
    +
    +
    +
    pgo create cluster testcluster --autofail
    +
    +
    +
    +

    auto failover when set on the cluster informs the operator to look +or watch for NotReady events on this cluster, and when those occur +to create a failover state machine which acts as a timer for the cluster. +If the timer expires, then a failover is triggered on the cluster turning +one of the cluster replica pods into the replacement primary pod. See +the How It Works documentation for more details on auto failover.

    +

    pgo backup

    @@ -475,6 +491,18 @@

    pgo backup

    be left intact but the actual Kubernetes Job will be removed prior to creating a new Job with the same name.

    +
    +

    You can override the PVC used by the backup job with the following:

    +
    +
    +
    +
    pgo backup mycluster --pvc-name=myremotepvc
    +
    +
    +
    +

    This might be useful for special backup cases, perhaps to create +a backup on a disaster recovery PVC.

    +

    pgo delete backup

    @@ -894,15 +922,7 @@

    pgo user

    -
    pgo user --delete-user=sally --selector=name=mycluster
    -
    -
    -
    -

    To delete that user in all clusters:

    -
    -
    -
    -
    pgo user --delete-user=sally
    +
    pgo user delete user sally --selector=name=mycluster
    @@ -951,14 +971,6 @@

    pgo user

    specify valid-days=-1 it will mean the password will not expire (e.g. infinity).

    -

    To drop a user:

    -
    -
    -
    -
    pgo user --delete-user=user3   --selector=project=xray
    -
    -
    -

    To see which passwords are set to expire in a given number of days:

    @@ -1101,15 +1113,27 @@

    pgo failover

    cluster.

    -

    This process includes the following actions: - * pick a target replica to become the new primary - * delete the current primary deployment to avoid user requests from - going to multiple primary databases (split brain) - * promote the targeted replica using pg_ctl promote, this will - cause PostgreSQL to go into read-write mode - * re-label the targeted replica to use the primary labels, this - will match the primary service selector and cause new requests - to the primary to be routed to the new primary (targeted replica)

    +

    This process includes the following actions:

    +
    +
    +
      +
    • +

      pick a target replica to become the new primary

      +
    • +
    • +

      delete the current primary deployment to avoid user requests from +going to multiple primary databases (split brain)

      +
    • +
    • +

      promote the targeted replica using pg_ctl promote, this will +cause PostgreSQL to go into read-write mode

      +
    • +
    • +

      re-label the targeted replica to use the primary labels, this +will match the primary service selector and cause new requests +to the primary to be routed to the new primary (targeted replica)

      +
    • +

    The command works like this:

    diff --git a/docs/how-it-works/index.html b/docs/how-it-works/index.html index 4a2566628b..a950e2ab5f 100644 --- a/docs/how-it-works/index.html +++ b/docs/how-it-works/index.html @@ -5,7 +5,7 @@ - + How it Works :: Crunchy Data PostgreSQL Operator Documentation @@ -91,13 +91,6 @@ Helm Chart
    - -
  • -
    - - Upgrading the Operator - -
  • @@ -112,6 +105,13 @@ Configuration
    +
  • +
  • +
    + + Upgrading the Operator + +
  • @@ -243,12 +243,14 @@

    How it Works

  • Cluster Deletion
  • Custom Postgres Configurations
  • Metrics Collection
  • +
  • Manual Failover
  • +
  • Auto Failover
  • -

    v3.0, 2018-06-04

    +

    v3.1, 2018-06-13

    Reference Architecture

    @@ -818,6 +820,99 @@

    Metrics Collection

    See the crunchy-containers Metrics example for more details on setting up the crunchy-metrics solution.

    +
    +

    Manual Failover

    +
    +

    With manual failover some key features include:

    +
    +
    +
      +
    • +

      when you perform a failover, a new replica is created to replace the +replica that was promoted to even out the cluster to the original +number of replicas

      +
    • +
    • +

      when you perform a failover, the promoted replica is removed from the pgreplica CRD to represent the current truth

      +
    • +
    +
    +
    +

    The pgo failover --query command will return a list of replica +targets which you can select from. That list include the Ready status +of the database as well as the Kube node name it is running on.

    +
    +
    +
    +

    Auto Failover

    +
    +

    Starting with release 3.1, there is an auto failover mechanism +that can be leveraged by pgo users if enabled.

    +
    +
    +

    This feature will cause the operator to start a timer on a database +primary that has received a NotReady status after the database +has started. This can happen if for instance the primary database +loses the connection to its database storage (e.g. gluster, NFS).

    +
    +
    +

    Once the timer is started, if the primary database does not get +back to a Ready status within that timer period, a failover +is triggered for this cluster. The failover target is selected +by the auto failover logic.

    +
    +
    +

    The amount of time (in seconds) the auto failover timer will wait before +triggering a failover is determined by the following pgo.yaml setting:

    +
    +
    +
    +
    AutofailSleepSeconds:  9
    +
    +
    +
    +

    If the above setting is not configured a default value of 30 seconds is +chose.

    +
    +
    +

    The logic of auto failover works like this:

    +
    +
    +
      +
    • +

      the readiness probe on the primary database container is executed every few seonds to check the readiness of the database, this is what tells Kubernetes whether or not the container is Ready or NotReady.

      +
    • +
    • +

      if a NotReady state is detected then that event is caught by the operator which is watching for database containers created by the operator

      +
    • +
    • +

      upon a NotReady event, a timer is started for that database which acts as the final check as to if a failover is required for that database

      +
    • +
    • +

      if the timer expires and the state is still Not Ready then the manual +failover logic is executed for this cluster which causes a promotion of +a replica to primary, and also creates a replacement replica

      +
    • +
    • +

      only replica targets with a status of Ready will be used to select +the target to promote

      +
    • +
    +
    +
    +

    The readiness probe settings are defined in the following template:

    +
    +
    +
    +
    conf/postgres-operator/cluster/1/cluster-deployment-1.json
    +
    +
    +
    +

    The readiness probe settings determine how often the database check +is performed. See the Kubernetes documentation on readiness probes for +more details on these settings.

    +
    +
    diff --git a/docs/index.html b/docs/index.html index 9332c812c1..564eb76558 100644 --- a/docs/index.html +++ b/docs/index.html @@ -5,7 +5,7 @@ - + Crunchy Data PostgreSQL Operator :: Crunchy Data PostgreSQL Operator Documentation @@ -91,13 +91,6 @@ Helm Chart - -
  • -
    - - Upgrading the Operator - -
  • @@ -112,6 +105,13 @@ Configuration
    +
  • +
  • +
    + + Upgrading the Operator + +
  • @@ -181,7 +181,7 @@
    -

    v2.6, 2018-06-04

    +

    v3.1, 2018-06-13

    Documentation

    diff --git a/docs/index.json b/docs/index.json index cbe3c03c99..a738e4dd64 100644 --- a/docs/index.json +++ b/docs/index.json @@ -4,77 +4,77 @@ "title": "Quick Installation", "tags": [], "description": "", - "content": "Table of Contents Overview Quickstart GKE/PKS Openshift Container Platform Next Steps v3.0, 2018-06-04\n Overview There are currently quickstart scripts that seek to automate the deployment to popular Kubernetes environments -\n quickstart-for-gke.sh\n quickstart-for-ocp.sh\n The quickstart-for-gke script will deploy the operator to a GKE Kube cluster.\n The quickstart-for-ocp script will deploy the operator to an Openshift Container Platform cluster.\n Both scripts assume you have a StorageClass defined for persistence.\n Pre-compiled versions of the Operator pgo client are provided for the x86_64, Mac OSX, and Windows hosts.\n Quickstart GKE/PKS The quickstart-for-gke.sh script will allow users to set up the Postgres Operator quickly on GKE including PKS. This script is tested on GKE but can be modified for use with other Kubernetes environments as well.\n The script requires a few things in order to work -\n wget utility installed\n kubectl utility installed\n StorageClass defined on your GKE instance\n Executing the script will give you a default Operator deployment that assumes dynamic storage and a storage class named standard, things that GKE provides.\n The script performs the following -\n downloads the Operator configuration files\n sets the $HOME/.pgouser file to default settings\n deploys the Operator Deployment\n sets your .bashrc to include the Operator environment variables\n sets your $HOME/.bash_completion file to be the pgo bash_completion file\n A tip, if you want to set your Kube context to some particular namespace you can run commands similar to this to set it to a demo namespace if that namespace has already been created on your GKE cluster:\n kubectl create -f $COROOT/examples/demo-namespace.json kubectl config set-context demo --cluster=gke_crunchy-a-test_us-central1-a_usera-quickstart --namespace=demo --user=gke_crunchy-a-test_us-central1-a_usera-quickstart kubectl config use-context demo For Mac and Windows users, pre-built pgo binaries are included in the operator release tar ball: * pgo-mac is the Mac binary * pgo.exe is the Windows binary * pgo is the Linux binary * expenv-mac is the expenv binary for Mac * expenv.exe is the expenv binary for Windows\n Currently the quickstart scripts are meant for Linux installs, you will need to modify this script for Windows or Mac installs until we support and provide Windows and Mac installation scripts.\n Openshift Container Platform A similar script for installing the operator on OCP is offered with similar features as the GKE script. This script is tested on OCP 3.7 with a StorageClass defined.\n Next Steps Next, visit the Deployment page to deploy the Operator, verify the installation, and view various storage configurations.\n " + "content": "Table of Contents Overview Quickstart GKE/PKS Openshift Container Platform Next Steps v3.1, 2018-06-13\n Overview There are currently quickstart scripts that seek to automate the deployment to popular Kubernetes environments -\n quickstart-for-gke.sh\n quickstart-for-ocp.sh\n The quickstart-for-gke script will deploy the operator to a GKE Kube cluster.\n The quickstart-for-ocp script will deploy the operator to an Openshift Container Platform cluster.\n Both scripts assume you have a StorageClass defined for persistence.\n Pre-compiled versions of the Operator pgo client are provided for the x86_64, Mac OSX, and Windows hosts.\n Quickstart GKE/PKS The quickstart-for-gke.sh script will allow users to set up the Postgres Operator quickly on GKE including PKS. This script is tested on GKE but can be modified for use with other Kubernetes environments as well.\n The script requires a few things in order to work -\n wget utility installed\n kubectl utility installed\n StorageClass defined on your GKE instance\n Executing the script will give you a default Operator deployment that assumes dynamic storage and a storage class named standard, things that GKE provides.\n The script performs the following -\n downloads the Operator configuration files\n sets the $HOME/.pgouser file to default settings\n deploys the Operator Deployment\n sets your .bashrc to include the Operator environment variables\n sets your $HOME/.bash_completion file to be the pgo bash_completion file\n A tip, if you want to set your Kube context to some particular namespace you can run commands similar to this to set it to a demo namespace if that namespace has already been created on your GKE cluster:\n kubectl create -f $COROOT/examples/demo-namespace.json kubectl config set-context demo --cluster=gke_crunchy-a-test_us-central1-a_usera-quickstart --namespace=demo --user=gke_crunchy-a-test_us-central1-a_usera-quickstart kubectl config use-context demo For Mac and Windows users, pre-built pgo binaries are included in the operator release tar ball: * pgo-mac is the Mac binary * pgo.exe is the Windows binary * pgo is the Linux binary * expenv-mac is the expenv binary for Mac * expenv.exe is the expenv binary for Windows\n Currently the quickstart scripts are meant for Linux installs, you will need to modify this script for Windows or Mac installs until we support and provide Windows and Mac installation scripts.\n Openshift Container Platform A similar script for installing the operator on OCP is offered with similar features as the GKE script. This script is tested on OCP 3.7 with a StorageClass defined.\n Next Steps Next, visit the Deployment page to deploy the Operator, verify the installation, and view various storage configurations.\n " }, { "uri": "https://crunchydata.github.io/postgres-operator/installation/manual-installation/", "title": "Manual Installation", "tags": [], "description": "", - "content": "Table of Contents Project Structure Installation Prerequsites Basic Installation Create HostPath Directory Build Images \u0026amp; Deploy Makefile Targets Next Steps v3.0, 2018-06-04\n Project Structure To perform an installation of the operator, first create the project structure as follows on your host, here we assume a local directory called odev -\n export GOPATH=$HOME/odev mkdir -p $HOME/odev/src $HOME/odev/bin $HOME/odev/pkg mkdir -p $GOPATH/src/github.com/crunchydata/ Next, get a tagged release of the source code -\n cd $GOPATH/src/github.com/crunchydata git clone https://github.com/CrunchyData/postgres-operator.git cd postgres-operator git checkout 3.0 Installation Prerequsites To run the operator and the pgo client, you will need the following -\n a running Kubernetes or OpenShift cluster\n the kubectl or oc clients installed in your PATH and configured to connect to the cluster (e.g. export KUBECONFIG=/etc/kubernetes/admin.conf)\n a Kubernetes namespace created and set to where you want the operator installed. For this install we assume a namespace of demo has been created.\n kubectl create -f examples/demo-namespace.json kubectl config set-context $(kubectl config current-context) --namespace=demo kubectl config view | grep namespace Warning If you are not using the demo namespace, it will be required to edit the following and change the namespace where the service account and cluster role bindings will be deployed.\n $COROOT/deploy/service-account.yaml\n $COROOT/deploy/cluster-role-binding.yaml\n Permissions are granted to the Operator by means of a Service Account called postgres-operator. That service account is added to the Operator deployment.\n The postgres-operator service account is granted cluster-admin priviledges using a cluster role binding postgres-operator-cluster-role-binding.\n See here for more details on how to enable RBAC roles and modify the scope of the permissions to suit your needs.\n Basic Installation The basic installation uses the default operator configuration settings, these settings assume you want to use HostPath storage on your Kube cluster for database persistence. Other persistent options are available but require the Advanced Installation below.\n Create HostPath Directory The default Persistent Volume script assumes a default HostPath directory be created called /data:\n sudo mkdir /data sudo chown 777 /data Create some sample Persistent Volumes using the following script:\n export CO_NAMESPACE=demo export CO_CMD=kubectl export COROOT=$GOPATH/src/github.com/crunchydata/postgres-operator go get github.com/blang/expenv $COROOT/pv/create-pv.sh Build Images \u0026amp; Deploy Packaged Images Packaged Images To pull prebuilt versions from Dockerhub of the postgres-operator containers, specify the image versions, and execute the following Makefile target -\n export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.0 make pull To pull down the prebuilt pgo binaries, download the tar.gz release file from the following link -\n Github Releases\n extract (e.g. tar xvzf postgres-operator.3.0.tar.gz)\n cd $HOME tar xvzf ./postgres-operator.3.0.tar.gz copy pgo client to somewhere in your path (e.g. cp pgo /usr/local/bin)\n Next, deploy the operator to your Kubernetes cluster -\n cd $COROOT make deployoperator Build from Source Build from Source The purpose of this section is to illustrate how to build the PostgreSQL Operator from source. These are considered advanced installation steps and should be primarily used by developers or those wishing a more precise installation method.\n Requirements The postgres-operator runs on any Kubernetes and Openshift platform that supports Custom Resource Definitions. The Operator is tested on Kubeadm and OpenShift Container Platform environments.\n The operator is developed with the Golang versions greater than or equal to version 1.8. See Golang website for details on installing golang.\n The Operator project builds and operates with the following containers -\n PVC Listing Container\n Remove Data Container\n postgres-operator Container\n apiserver Container\n file load Container\n This Operator is developed and tested on the following operating systems but is known to run on other operating systems -\n CentOS 7\n RHEL 7\n First, install the project library dependencies. The godep dependency manager is used for this purpose. -\n cd $COROOT make setup Then, compile the PostgreSQL Operator using the Makefile.\n cd $COROOT make all which pgo Finally, deploy the operator to your Kubernetes cluster.\n cd $COROOT make deployoperator Makefile Targets The following table describes the Makefile targets -\n Table 1. Makefile Targets Target Description all\n compile all binaries and build all images\n setup\n fetch the dependent packages required to build with\n deployoperator\n deploy the Operator (apiserver and postgers-operator) to Kubernetes\n main\n compile the postgres-operator\n runmain\n locally execute the postgres-operator\n pgo\n build the pgo binary\n runpgo\n run the pgo binary\n runapiserver\n run the apiserver binary outside of Kube\n clean\n remove binaries and compiled packages, restore dependencies\n operatorimage\n compile and build the postgres-operator Docker image\n apiserverimage\n compile and build the apiserver Docker image\n lsimage\n build the lspvc Docker image\n loadimage\n build the file load Docker image\n rmdataimage\n build the data deletion Docker image\n release\n build the postgres-operator release\n Next Steps Next, visit the Deployment page to deploy the Operator, verify the installation, and view various storage configurations.\n " + "content": "Table of Contents Project Structure Installation Prerequsites Basic Installation Create HostPath Directory Build Images \u0026amp; Deploy Makefile Targets Next Steps v3.1, 2018-06-13\n Project Structure First, define the following environment variables in .bashrc:\n export GOPATH=$HOME/odev export GOBIN=$GOPATH/bin export CO_NAMESPACE=demo export CO_CMD=kubectl export COROOT=$GOPATH/src/github.com/crunchydata/postgres-operator export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.1 export CO_BASEOS=centos7 When deploying on Openshift Container Platform, the CO_CMD environment variable should be:\n export CO_CMD=oc To perform an installation of the operator, first create the project structure as follows on your host, here we assume a local directory called odev -\n mkdir -p $HOME/odev/src $HOME/odev/bin $HOME/odev/pkg mkdir -p $GOPATH/src/github.com/crunchydata/ Next, get a tagged release of the source code -\n cd $GOPATH/src/github.com/crunchydata git clone https://github.com/CrunchyData/postgres-operator.git cd postgres-operator git checkout 3.1 Installation Prerequsites To run the operator and the pgo client, you will need the following -\n a running Kubernetes or OpenShift cluster\n the kubectl or oc clients installed in your PATH and configured to connect to the cluster (e.g. export KUBECONFIG=/etc/kubernetes/admin.conf)\n a Kubernetes namespace created and set to where you want the operator installed. For this install we assume a namespace of demo has been created.\n kubectl create -f examples/demo-namespace.json kubectl config set-context $(kubectl config current-context) --namespace=demo kubectl config view | grep namespace Warning The namespace used by the operator is determined by the CO_NAMESPACE environment variable setting.\n Permissions are granted to the Operator by means of a Service Account called postgres-operator. That service account is added to the Operator deployment.\n The postgres-operator service account is granted priviledges using a role binding pgo-role-binding.\n See here for more details on how to enable RBAC roles and modify the scope of the permissions to suit your needs.\n Basic Installation The basic installation uses the default operator configuration settings, these settings assume you want to use HostPath storage on your Kube cluster for database persistence. Other persistent options are available but require the Advanced Installation below.\n Create HostPath Directory The default Persistent Volume script assumes a default HostPath directory be created called /data:\n sudo mkdir /data sudo chown 777 /data Create some sample Persistent Volumes using the following script:\n go get github.com/blang/expenv $COROOT/pv/create-pv.sh Build Images \u0026amp; Deploy Packaged Images Packaged Images To pull prebuilt versions from Dockerhub of the postgres-operator containers, execute the following Makefile target -\n make pull To pull down the prebuilt pgo binaries, download the tar.gz release file from the following link -\n Github Releases\n extract (e.g. tar xvzf postgres-operator.3.1.tar.gz)\n cd $HOME tar xvzf ./postgres-operator.3.1.tar.gz copy pgo client to somewhere in your path (e.g. cp pgo /usr/local/bin)\n Next, deploy the operator to your Kubernetes cluster -\n cd $COROOT make deployoperator Build from Source Build from Source The purpose of this section is to illustrate how to build the PostgreSQL Operator from source. These are considered advanced installation steps and should be primarily used by developers or those wishing a more precise installation method.\n Requirements The postgres-operator runs on any Kubernetes and Openshift platform that supports Custom Resource Definitions. The Operator is tested on Kubeadm and OpenShift Container Platform environments.\n The operator is developed with the Golang versions greater than or equal to version 1.8. See Golang website for details on installing golang.\n The Operator project builds and operates with the following containers -\n PVC Listing Container\n Remove Data Container\n postgres-operator Container\n apiserver Container\n file load Container\n This Operator is developed and tested on the following operating systems but is known to run on other operating systems -\n CentOS 7\n RHEL 7\n Before compiling the Operator, it\u0026#8217;s necessary to install the Mercurial requirement.\n sudo yum -y install mercurial Then, install the project library dependencies. The godep dependency manager is used for this purpose. Then, compile the PostgreSQL Operator using the Makefile and deploy the operator to your Kubernetes cluster.\n cd $COROOT make setup make all make deployoperator Makefile Targets The following table describes the Makefile targets -\n Table 1. Makefile Targets Target Description all\n compile all binaries and build all images\n setup\n fetch the dependent packages required to build with\n deployoperator\n deploy the Operator (apiserver and postgers-operator) to Kubernetes\n main\n compile the postgres-operator\n runmain\n locally execute the postgres-operator\n pgo\n build the pgo binary\n runpgo\n run the pgo binary\n runapiserver\n run the apiserver binary outside of Kube\n clean\n remove binaries and compiled packages, restore dependencies\n operatorimage\n compile and build the postgres-operator Docker image\n apiserverimage\n compile and build the apiserver Docker image\n lsimage\n build the lspvc Docker image\n loadimage\n build the file load Docker image\n rmdataimage\n build the data deletion Docker image\n release\n build the postgres-operator release\n Next Steps Next, visit the Deployment page to deploy the Operator, verify the installation, and view various storage configurations.\n " }, { "uri": "https://crunchydata.github.io/postgres-operator/installation/helm-chart/", "title": "Helm Chart", "tags": [], "description": "", - "content": "v3.0, 2018-06-04\n Helm Chart First, pull prebuilt versions from Dockerhub of the postgres-operator containers, specify the image versions, and execute the following Makefile target -\n export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.0 make pull Then, build and deploy the operator using the provided Helm chart -\n cd $COROOT/chart helm install ./postgres-operator helm ls Next Steps Next, visit the Deployment page to deploy the Operator, verify the installation, and view various storage configurations.\n " + "content": "v3.1, 2018-06-13\n Helm Chart First, pull prebuilt versions from Dockerhub of the postgres-operator containers, specify the image versions, and execute the following Makefile target -\n export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.1 make pull Then, build and deploy the operator using the provided Helm chart -\n cd $COROOT/chart helm install ./postgres-operator helm ls Next Steps Next, visit the Deployment page to deploy the Operator, verify the installation, and view various storage configurations.\n " }, { - "uri": "https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/", - "title": "Upgrading the Operator", + "uri": "https://crunchydata.github.io/postgres-operator/installation/deployment/", + "title": "Deployment", "tags": [], "description": "", - "content": "v3.0, 2018-06-04\n Upgrading from v2.4 to v2.5 For a full list of additions and revisions that occurred in the PostgreSQL Operator v2.5 release, please view the related release page here.\n Required Updates This section notes some required steps that will need to be taken in the process of upgrading from v2.4 to v2.5.\n Configuration File It will be necessary to update your existing pgo.yaml configuration file where the Storage Configuration sections are concerned. The updated file for v2.5 can be found here. The file contained within the local installation of the Operator is located by default in the following location -\n $COROOT/conf/apiserver/pgo.yaml Secrets 2.5 changed the names of the database credentials that are created by default in order to be consistent with the way new database credentials are named.\n It will be necessary to run the following script to update your existing clusters. This script will essentially copy the existing secrets values and create new secrets with those same values but named to the new standard. Run the script by passing in the name of an existing cluster as a parameter.\n $COROOT/bin/upgrade-secret.sh Upgrading from v2.5 to v2.6 For a full list of additions and revisions that occurred in the PostgreSQL Operator v2.5 release, please view the related release page here.\n Required Updates This section notes some required steps that will need to be taken in the process of upgrading from v2.5 to v2.6.\n Configuration File One update in v2.6 changed the pgo.yaml file through removing the Debug flag. The Pgo.Debug variable can now be removed from the pgo.yaml file as a result. The debug flag is now called CRUNCHY_DEBUG and is set in the deployment.json file as a default environment variable.\n Container Resources Release 2.6 added the concept of container resource configurations to the pgo.yaml file. In order to specify the optional container resource configurations, add a section as follows to your pgo.yaml file -\n DefaultContainerResource: small ContainerResources: small: RequestsMemory: 2Gi RequestsCPU: 0.5 LimitsMemory: 2Gi LimitsCPU: 1.0 large: RequestsMemory: 8Gi RequestsCPU: 2.0 LimitsMemory: 12Gi LimitsCPU: 4.0 \u0026lt;div class=\"notices warning\" \u0026gt;\u0026lt;div class=\"paragraph\"\u0026gt; \u0026lt;p\u0026gt;If these settings are set incorrectly or if the Kubernetes cluster cannot meet the defined memory and CPU requirements, deployments will go into a \u0026lt;strong\u0026gt;pending\u0026lt;/strong\u0026gt; state.\u0026lt;/p\u0026gt; \u0026lt;/div\u0026gt; \u0026lt;/div\u0026gt;\n Kube RBAC Release 2.6 added a rbac.yaml file to capture the Kube RBAC rules. These RBAC rules allow the apiserver and postgres-operator containers access to the Kubernetes resources required for the operator to work. As part of the deployment process, it is necessary to execute the rbac.yaml file to set the roles and bindings required by the operator. Adjust this file to suit local security requirements.\n Application RBAC Release 2.6 added an RBAC capability to secure the pgo application. The pgouser now has a role appended at the end of of each user definition as follows -\n username:password:pgoadmin testuser:testpass:pgoadmin readonlyuser:testpass:pgoreader These are defined in the following file -\n $COROOT/conf/apiserver/pgouser To match the behavior of the pre 2.6 releases, the pgadmin role is set on the previous user definitions, but a readonlyuser is now defined to test other role definitions. The roles are defined in a new file called pgorole. This file defines each role and the permissions for that role. By default, two roles are defined as samples -\n pgoadmin pgoreader Adjust these default settings to meet local security requirements.\n The format of this file is as follows -\n rolename: permissionA, permissionB These are defined in the following file -\n $COROOT/conf/apiserver/pgorole The complete set of permissions is documented in the Configuration document.\n User Creation Release 2.6 replaced the pgo user --add command with the pgo create user command to improve consistency across command usage. Any scripts written using the older style of command require an update to use the new command syntax.\n Replica CRD There is a new Kubernetes Custom Resource Definition that serves the purpose of holding replica information, called pgreplicas. This CRD is populated with the pgo scale command and is used to hold per-replica specific information such as the resource and storage configurations requested at run time.\n \n " + "content": "Table of Contents Verify Operator Status Configure pgo Client Verify pgo Client Storage Configuration Next Steps v3.1, 2018-06-13\n This document details verifying the installation of the PostgreSQL Operator is successful, in addition to detailing some different storage configurations that can be made.\n Verify Operator Status To verify that the operator is deployed and running, run the following:\n kubectl get pod --selector=name=postgres-operator You should see output similar to this:\n NAME READY STATUS RESTARTS AGE postgres-operator-56598999cd-tbg4w 2/2 Running 0 1m There are 2 containers in the operator pod, both should be ready as above.\n When you first run the operator, it will create the required CustomResourceDefinitions. You can view these as follows -\n kubectl get crd The operator creates the following Custom Resource Definitions over time as the associated commands are triggered.\n kubectl get crd NAME AGE pgbackups.cr.client-go.k8s.io 2d pgclusters.cr.client-go.k8s.io 2d pgingests.cr.client-go.k8s.io 2d pgpolicies.cr.client-go.k8s.io 2d pgreplicas.cr.client-go.k8s.io 2d pgtasks.cr.client-go.k8s.io 2d pgupgrades.cr.client-go.k8s.io 2d At this point, the server side of the operator is deployed and ready.\n The complete set of environment variables used in the installation so far are -\n export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.1 export GOPATH=$HOME/odev export GOBIN=$GOPATH/bin export PATH=$PATH:$GOBIN export COROOT=$GOPATH/src/github.com/crunchydata/postgres-operator export CO_CMD=kubectl You would normally add these into your .bashrc at this point to be used later on or if you want to redeploy the operator.\n Configure pgo Client The pgo command line client requires TLS for securing the connection to the operator\u0026#8217;s REST API. This configuration is performed as follows -\n export PGO_CA_CERT=$COROOT/conf/apiserver/server.crt export PGO_CLIENT_CERT=$COROOT/conf/apiserver/server.crt export PGO_CLIENT_KEY=$COROOT/conf/apiserver/server.key The pgo client uses Basic Authentication to authenticate to the operator REST API, for authentication, add the following .pgouser file to your $HOME -\n echo \"username:password\" \u0026gt; $HOME/.pgouser The pgo client needs the URL to connect to the operator.\n Depending on your Kubernetes environment this can be done the following ways.\n Running Kubernetes Locally Running Kubernetes Locally If your local host is not set up to resolve Kubernetes Service DNS names, you can specify the operator IP address as follows -\n kubectl get service postgres-operator NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE postgres-operator NodePort 10.109.184.8 \u0026lt;none\u0026gt; 8443:30894/TCP 5m export CO_APISERVER_URL=https://10.109.184.8:8443 pgo version It is also possible to define a bash alias such as -\n alias setip='export CO_APISERVER_URL=https://`kubectl get service postgres-operator -o=jsonpath=\"{.spec.clusterIP}\"`:8443' This alias will set the CO_APISERVER_URL IP address for you.\n Running Kubernetes Remotely Running Kubernetes Remotely Set up a port-forward tunnel from your host to the Kube remote host, specifying the operator pod -\n kubectl get pod --selector=name=postgres-operator NAME READY STATUS RESTARTS AGE postgres-operator-56598999cd-tbg4w 2/2 Running 0 8m kubectl port-forward postgres-operator-56598999cd-tbg4w 8443:8443 In another terminal -\n export CO_APISERVER_URL=https://127.0.0.1:8443 pgo version Verify pgo Client At this point you should be able to connect to the operator as follows -\n pgo version pgo client version 3.1 apiserver version 3.1 Operator commands are documented on the Getting Started page.\n Storage Configuration Most users after they try out the operator will want to create a more customized installation and deployment of the operator using specific storage types.\n The operator will work with HostPath, NFS, Dynamic, and GKE Storage.\n NFS NFS To configure the operator to use NFS for storage, a sample pgo.yaml.nfs file is provided. Overlay the default pgo.yaml file with that file -\n cp $COROOT/examples/pgo.yaml.nfs $COROOT/conf/apiserver/pgo.yaml Edit the pgo.yaml file to specify the NFS GID that is set for the NFS volume mount you will be using, the default value assumed is nfsnobody as the GID (65534). Update the value to meet your NFS security settings.\n There is currently no script available to create your NFS Persistent Volumes but you can typically modify the $COROOT/pv/create-pv.sh script to work with NFS.\n Dynamic Dynamic To configure the operator to use Dynamic Storage classes for storage, a sample pgo.yaml.storageclass file is provided. Overlay the default pgo.yaml file with that file -\n cp $COROOT/examples/pgo.yaml.storageclass $COROOT/conf/apiserver/pgo.yaml Edit the pgo.yaml file to specify the storage class you will be using, the default value assumed is standard which is the name used by default within a GKE Kube cluster deployment. Update the value to match your storage classes.\n Notice that the FsGroup setting is required for most block storage and is set to the value of 26 since the PostgreSQL container runs as UID 26.\n GKE GKE Some notes for setting up GKE for the Operator deployment.\n Install Kubectl On your host you will be working from, install the kubectl command -\n https://kubernetes.io/docs/tasks/tools/install-kubectl/\n GCP Select your project\n Create a Kube cluster in that project\n By default a storage class called standard is created.\n Install GCloud To access the Kubernetes cluster, install the gcloud utility -\n https://cloud.google.com/sdk/downloads cd google-cloud-sdk ./install.sh Configure Kubectl for Cluster Access gcloud auth login gcloud container clusters get-credentials jeff-quickstart --zone us-central1-a --project crunchy-dev-test kubectl get storageclass Next Steps There are many ways to configure the operator further. Some sample configurations are documented on the Configuration page.\n You may also want to find out more information on how the operator is designed to work and deploy. This information can be found in the How It Works page.\n Information can be found on the full scope of commands on the Getting Started page.\n " }, { "uri": "https://crunchydata.github.io/postgres-operator/installation/", "title": "Installation", "tags": [], "description": "", - "content": "v3.0, 2018-06-04\n Installation For a quick deployment on either a GKE or OpenShift environment, visit the Quick Installation page.\n For a manual installation of the Operator on either a Kubernetes or OpenShift environment, visit the Manual Installation page.\n A Helm Chart is also provided.\n If you\u0026#8217;re looking to upgrade a current PostgreSQL Operator installation, visit the Upgrading the Operator page.\n After completing the installation steps, ensure you visit the Deployment page to deploy the Operator to your environment.\n Next Steps There are many ways to configure the operator further. Some sample configurations are documented on the Configuration page.\n You may also want to find out more information on how the operator is designed to work and deploy. This information can be found in the How It Works page.\n Information can be found on the full scope of commands on the Getting Started page.\n " + "content": "v3.1, 2018-06-13\n Installation For a quick deployment on either a GKE or OpenShift environment, visit the Quick Installation page.\n For a manual installation of the Operator on either a Kubernetes or OpenShift environment, visit the Manual Installation page.\n A Helm Chart is also provided.\n If you\u0026#8217;re looking to upgrade a current PostgreSQL Operator installation, visit the Upgrading the Operator page.\n After completing the installation steps, ensure you visit the Deployment page to deploy the Operator to your environment.\n Next Steps There are many ways to configure the operator further. Some sample configurations are documented on the Configuration page.\n You may also want to find out more information on how the operator is designed to work and deploy. This information can be found in the How It Works page.\n Information can be found on the full scope of commands on the Getting Started page.\n " }, { - "uri": "https://crunchydata.github.io/postgres-operator/installation/deployment/", - "title": "Deployment", + "uri": "https://crunchydata.github.io/postgres-operator/installation/configuration/", + "title": "Configuration", "tags": [], "description": "", - "content": "Table of Contents Verify Operator Status Configure pgo Client Verify pgo Client Storage Configuration Next Steps v3.0, 2018-06-04\n This document details verifying the installation of the PostgreSQL Operator is successful, in addition to detailing some different storage configurations that can be made.\n Verify Operator Status To verify that the operator is deployed and running, run the following:\n kubectl get pod --selector=name=postgres-operator You should see output similar to this:\n NAME READY STATUS RESTARTS AGE postgres-operator-56598999cd-tbg4w 2/2 Running 0 1m There are 2 containers in the operator pod, both should be ready as above.\n When you first run the operator, it will create the required CustomResourceDefinitions. You can view these as follows -\n kubectl get crd The operator creates the following Custom Resource Definitions over time as the associated commands are triggered.\n kubectl get crd NAME AGE pgbackups.cr.client-go.k8s.io 2d pgclusters.cr.client-go.k8s.io 2d pgingests.cr.client-go.k8s.io 2d pgpolicies.cr.client-go.k8s.io 2d pgreplicas.cr.client-go.k8s.io 2d pgtasks.cr.client-go.k8s.io 2d pgupgrades.cr.client-go.k8s.io 2d At this point, the server side of the operator is deployed and ready.\n The complete set of environment variables used in the installation so far are -\n export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.0 export GOPATH=$HOME/odev export GOBIN=$GOPATH/bin export PATH=$PATH:$GOBIN export COROOT=$GOPATH/src/github.com/crunchydata/postgres-operator export CO_CMD=kubectl You would normally add these into your .bashrc at this point to be used later on or if you want to redeploy the operator.\n Configure pgo Client The pgo command line client requires TLS for securing the connection to the operator\u0026#8217;s REST API. This configuration is performed as follows -\n export PGO_CA_CERT=$COROOT/conf/apiserver/server.crt export PGO_CLIENT_CERT=$COROOT/conf/apiserver/server.crt export PGO_CLIENT_KEY=$COROOT/conf/apiserver/server.key The pgo client uses Basic Authentication to authenticate to the operator REST API, for authentication, add the following .pgouser file to your $HOME -\n echo \"username:password\" \u0026gt; $HOME/.pgouser The pgo client needs the URL to connect to the operator.\n Depending on your Kubernetes environment this can be done the following ways.\n Running Kubernetes Locally Running Kubernetes Locally If your local host is not set up to resolve Kubernetes Service DNS names, you can specify the operator IP address as follows -\n kubectl get service postgres-operator NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE postgres-operator NodePort 10.109.184.8 \u0026lt;none\u0026gt; 8443:30894/TCP 5m export CO_APISERVER_URL=https://10.109.184.8:8443 pgo version It is also possible to define a bash alias such as -\n alias setip='export CO_APISERVER_URL=https://`kubectl get service postgres-operator -o=jsonpath=\"{.spec.clusterIP}\"`:8443' This alias will set the CO_APISERVER_URL IP address for you.\n Running Kubernetes Remotely Running Kubernetes Remotely Set up a port-forward tunnel from your host to the Kube remote host, specifying the operator pod -\n kubectl get pod --selector=name=postgres-operator NAME READY STATUS RESTARTS AGE postgres-operator-56598999cd-tbg4w 2/2 Running 0 8m kubectl port-forward postgres-operator-56598999cd-tbg4w 8443:8443 In another terminal -\n export CO_APISERVER_URL=https://127.0.0.1:8443 pgo version Verify pgo Client At this point you should be able to connect to the operator as follows -\n pgo version pgo client version 3.0 apiserver version 3.0 Operator commands are documented on the Getting Started page.\n Storage Configuration Most users after they try out the operator will want to create a more customized installation and deployment of the operator using specific storage types.\n The operator will work with HostPath, NFS, Dynamic, and GKE Storage.\n NFS NFS To configure the operator to use NFS for storage, a sample pgo.yaml.nfs file is provided. Overlay the default pgo.yaml file with that file -\n cp $COROOT/examples/pgo.yaml.nfs $COROOT/conf/apiserver/pgo.yaml Edit the pgo.yaml file to specify the NFS GID that is set for the NFS volume mount you will be using, the default value assumed is nfsnobody as the GID (65534). Update the value to meet your NFS security settings.\n There is currently no script available to create your NFS Persistent Volumes but you can typically modify the $COROOT/pv/create-pv.sh script to work with NFS.\n Dynamic Dynamic To configure the operator to use Dynamic Storage classes for storage, a sample pgo.yaml.storageclass file is provided. Overlay the default pgo.yaml file with that file -\n cp $COROOT/examples/pgo.yaml.storageclass $COROOT/conf/apiserver/pgo.yaml Edit the pgo.yaml file to specify the storage class you will be using, the default value assumed is standard which is the name used by default within a GKE Kube cluster deployment. Update the value to match your storage classes.\n Notice that the FsGroup setting is required for most block storage and is set to the value of 26 since the PostgreSQL container runs as UID 26.\n GKE GKE Some notes for setting up GKE for the Operator deployment.\n Install Kubectl On your host you will be working from, install the kubectl command -\n https://kubernetes.io/docs/tasks/tools/install-kubectl/\n GCP Select your project\n Create a Kube cluster in that project\n By default a storage class called standard is created.\n Install GCloud To access the Kubernetes cluster, install the gcloud utility -\n https://cloud.google.com/sdk/downloads cd google-cloud-sdk ./install.sh Configure Kubectl for Cluster Access gcloud auth login gcloud container clusters get-credentials jeff-quickstart --zone us-central1-a --project crunchy-dev-test kubectl get storageclass Next Steps There are many ways to configure the operator further. Some sample configurations are documented on the Configuration page.\n You may also want to find out more information on how the operator is designed to work and deploy. This information can be found in the How It Works page.\n Information can be found on the full scope of commands on the Getting Started page.\n " + "content": "Table of Contents Overview Openshift Container Platform Security Configuration Kube RBAC Basic Authentication Configure TLS pgo RBAC apiserver Configuration postgres-operator Container Configuration bash Completion REST API Deploying pgpool v3.1, 2018-06-13\n Overview This document describes how to configure the operator beyond the default configurations in addition to detailing what the configuration settings mean.\n Openshift Container Platform To run the Operator on Openshift Container Platform note the following requirements -\n Openshift Container Platform 3.7 or greater is required due to the dependence on Custom Resource Definitions.\n The CO_CMD environment variable should be set to oc when operating in an Openshift environment.\n Security Configuration Kube RBAC The apiserver and postgres-operator containers access Kubernetes resources and therefore require privileges for successful Kubernetes interactions. The rbac.yaml file includes a set of roles and bindings that allow the operator to work. These are fine grained controls that you can adjust to your local Kubernetes cluster depending on specific security requirements. The rbac.yaml file is executed when the operator is first deployed to the Kubernetes cluster.\n Permissions are granted to the Operator by means of a Service Account called postgres-operator. That service account is added through the Operator deployment.\n Warning The CO_NAMESPACE environment variable determines the namespace that is used within the deployment of the operator. If you are deploying to the demo namespace, the following should setting should be defined in your .bashrc: export CO_NAMESPACE=demo\n See here for more details on how to enable RBAC roles and modify the scope of the permissions to suit your needs.\n Basic Authentication Basic authentication between the host and the apiserver is required. It will be necessary to configure the pgo client to specify a basic authentication username and password through the creation a file in the user\u0026#8217;s home directory named .pgouser. It will look similar to this, and contain only a single line -\n username:password The above excerpt specifies a username of username and a password of password. These values will be read by the pgo client and passed to the apiserver on each REST API call.\n For the apiserver, a list of usernames and passwords is specified in the apiserver-conf-secret Secret. The values specified in a deployment are found in the following location -\n $COROOT/conf/apiserver/pgouser The sample configuration for pgouser is as follows -\n username:password testuser:testpass Modify these values to be unique to your environment.\n If the username and password passed by clients to the apiserver do not match, the REST call will fail and a log message will be produced in the apiserver container log. The client will receive a 401 HTTP status code if they are not able to authenticate.\n If the pgouser file is not found in the home directory of the pgo user then the next searched location is /etc/pgo/pgouser. If the file is not found in either of the locations, the pgo client searches for the existence of a PGOUSER environment variable in order to locate a path to the basic authentication file.\n Basic authentication can be entirely disabled by setting the BasicAuth setting in the pgo.yaml configuration file to false.\n Configure TLS TLS is used to secure communications to the apiserver. Sample keys and certifications that can be used by TLS are found here -\n $COROOT/conf/apiserver/server.crt $COROOT/conf/apiserver/server.key If you want to generate your own keys, you can use the script found in -\n $COROOT/bin/make-certs.sh The pgo client is required to use keys to connect to the apiserver. Specify the keys for pgo by setting the following environment variables -\n export PGO_CA_CERT=$COROOT/conf/apiserver/server.crt export PGO_CLIENT_CERT=$COROOT/conf/apiserver/server.crt export PGO_CLIENT_KEY=$COROOT/conf/apiserver/server.key The sample server keys are used as the client keys; adjust to suit security requirements.\n For the apiserver TLS configuration, the keys are included in the apiserver-conf-secret Secret when the apiserver is deployed. See the $COROOT/deploy/deploy.sh script which is where the secret is created.\n The apiserver listens on port 8443 (e.g. https://postgres-operator:8443) by default.\n You can set InsecureSkipVerify to true by setting the NO_TLS_VERIFY environment variable in the deployment.json file to true. By default this value is set to false if you do not specify a value.\n pgo RBAC The pgo command line utility talks to the apiserver REST API instead of the Kubernetes API. It is therefore necessary for the pgo client to make use of RBAC configuration.\n Starting in Release 3.0, the /conf/apiserver/pgorole is used to define some sample pgo roles, pgadmin and pgoreader.\n These roles are meant as examples that you can configure to suit security requirements as necessary. The pgadmin role grants a user authorization to all pgo commands. The pgoreader only grants access to pgo commands that display information such as pgo show cluster.\n The pgorole file is read at start up time when the operator is deployed to the Kubernetes cluster.\n Also, the pgouser file now includes the role that is assigned to a specific user as follows -\n username:password:pgoadmin testuser:testpass:pgoadmin readonlyuser:testpass:pgoreader The following list shows the current complete list of possible pgo permissions -\n Table 1. pgo Permissions Permission Description ShowCluster\n allow pgo show cluster\n CreateCluster\n allow pgo create cluster\n TestCluster\n allow pgo test mycluster\n ShowBackup\n allow pgo show backup\n CreateBackup\n allow pgo backup mycluster\n DeleteBackup\n allow pgo delete backup mycluster\n Label\n allow pgo label\n Load\n allow pgo load\n CreatePolicy\n allow pgo create policy\n DeletePolicy\n allow pgo delete policy\n ShowPolicy\n allow pgo show policy\n ApplyPolicy\n allow pgo apply policy\n ShowPVC\n allow pgo show pvc\n CreateUpgrade\n allow pgo upgrade\n ShowUpgrade\n allow pgo show upgrade\n DeleteUpgrade\n allow pgo delete upgrade\n CreateUser\n allow pgo create user\n CreateFailover\n allow pgo failover\n User\n allow pgo user\n Version\n allow pgo version\n If the user is unauthorized for a pgo command, the user will get back this response -\n FATA[0000] Authentication Failed: 40 apiserver Configuration The postgres-operator pod includes the apiserver which is a REST API that pgo users are able to communicate with.\n The apiserver uses the following configuration files found in $COROOT/conf/apiserver to determine how the Operator will provision PostgreSQL containers -\n $COROOT/conf/apiserver/pgo.yaml $COROOT/conf/apiserver/pgo.lspvc-template.json $COROOT/conf/apiserver/pgo.load-template.json Note that the default pgo.yaml file assumes you are going to use HostPath Persistent Volumes for your storage configuration. It will be necessary to adjust this file for NFS or other storage configurations. Some examples of how are listed in the manual installation document.\n The version of PostgreSQL container the Operator will deploy is determined by the CCPImageTag setting in the $COROOT/conf/apiserver/pgo.yaml configuration file. By default, this value is set to the latest release of the Crunchy Container Suite.\n pgo.yaml The default pgo.yaml configuration file, included in $COROOT/conf/apiserver/pgo.yaml, looks like this -\n BasicAuth: true Cluster: CCPImageTag: centos7-10.4-1.8.3 Port: 5432 User: testuser Database: userdb PasswordAgeDays: 60 PasswordLength: 8 Strategy: 1 Replicas: 0 ArchiveMode: false ArchiveTimeout: 60 PrimaryStorage: storage1 BackupStorage: storage1 ReplicaStorage: storage1 Storage: storage1: AccessMode: ReadWriteMany Size: 200M StorageType: create storage2: AccessMode: ReadWriteMany Size: 333M StorageType: create storage3: AccessMode: ReadWriteMany Size: 440M StorageType: create DefaultContainerResource: small ContainerResources: small: RequestsMemory: 2Gi RequestsCPU: 0.5 LimitsMemory: 2Gi LimitsCPU: 1.0 large: RequestsMemory: 8Gi RequestsCPU: 2.0 LimitsMemory: 12Gi LimitsCPU: 4.0 Pgo: Audit: false Metrics: false LSPVCTemplate: /config/pgo.lspvc-template.json CSVLoadTemplate: /config/pgo.load-template.json COImagePrefix: crunchydata COImageTag: centos7-2.7 Values in the pgo configuration file have the following meaning:\n Table 2. pgo Configuration File Definitions Setting Definition BasicAuth\n if set to true will enable Basic Authentication\n Cluster.CCPImageTag\n newly created containers will be based on this image version (e.g. centos7-10.4-1.8.3), unless you override it using the --ccp-image-tag command line flag\n Cluster.Port\n the PostgreSQL port to use for new containers (e.g. 5432)\n Cluster.User\n the PostgreSQL normal user name\n Cluster.Strategy\n sets the deployment strategy to be used for deploying a cluster, currently there is only strategy 1\n Cluster.Replicas\n the number of cluster replicas to create for newly created clusters\n Cluster.Policies\n optional, list of policies to apply to a newly created cluster, comma separated, must be valid policies in the catalog\n Cluster.PasswordAgeDays\n optional, if set, will set the VALID UNTIL date on passwords to this many days in the future when creating users or setting passwords, defaults to 60 days\n Cluster.PasswordLength\n optional, if set, will determine the password length used when creating passwords, defaults to 8\n Cluster.ArchiveMode\n optional, if set to true will enable archive logging for all clusters created, default is false.\n Cluster.ArchiveTimeout\n optional, if set, will determine the archive timeout setting used when ArchiveMode is true, defaults to 60 seconds\n PrimaryStorage\n required, the value of the storage configuration to use for the primary PostgreSQL deployment\n BackupStorage\n required, the value of the storage configuration to use for backups\n ReplicaStorage\n required, the value of the storage configuration to use for the replica PostgreSQL deployments\n Storage.storage1.StorageClass\n for a dynamic storage type, you can specify the storage class used for storage provisioning(e.g. standard, gold, fast)\n Storage.storage1.AccessMode\n the access mode for new PVCs (e.g. ReadWriteMany, ReadWriteOnce, ReadOnlyMany). See below for descriptions of these.\n Storage.storage1.Size\n the size to use when creating new PVCs (e.g. 100M, 1Gi)\n Storage.storage1.StorageType\n supported values are either dynamic, existing, create, or emptydir, if not supplied, emptydir is used\n Storage.storage1.Fsgroup\n optional, if set, will cause a SecurityContext and fsGroup attributes to be added to generated Pod and Deployment definitions\n Storage.storage1.SupplementalGroups\n optional, if set, will cause a SecurityContext to be added to generated Pod and Deployment definitions\n DefaultContainerResource\n optional, the value of the container resources configuration to use for all database containers, if not set, no resource limits or requests are added on the database container\n ContainerResources.small.RequestsMemory\n request size of memory in bytes\n ContainerResources.small.RequestsCPU\n request size of CPU cores\n ContainerResources.small.LimitsMemory\n request size of memory in bytes\n ContainerResources.small.LimitsCPU\n request size of CPU cores\n ContainerResources.large.RequestsMemory\n request size of memory in bytes\n ContainerResources.large.RequestsCPU\n request size of CPU cores\n ContainerResources.large.LimitsMemory\n request size of memory in bytes\n ContainerResources.large.LimitsCPU\n request size of CPU cores\n Pgo.LSPVCTemplate\n the PVC lspvc template file that lists PVC contents\n Pgo.LoadTemplate\n the load template file used for load jobs\n Pgo.COImagePrefix\n image tag prefix to use for the Operator containers\n Pgo.COImageTag\n image tag to use for the Operator containers\n Pgo.Audit\n boolean, if set to true will cause each apiserver call to be logged with an audit marking\n Pgo.Metrics\n boolean, if set to true will cause each new cluster to include crunchy-collect as a sidecar container for metrics collection, if set to false (default), users can still add metrics on a cluster-by-cluster basis using the pgo command flag --metrics\n Storage Configurations You can now define n-number of Storage configurations within the pgo.yaml file. Those Storage configurations follow these conventions -\n they must have lowercase name (e.g. storage1)\n they must be unique names (e.g. mydrstorage, faststorage, slowstorage)\n These Storage configurations are referenced in the BackupStorage, ReplicaStorage, and PrimaryStorage configuration values. However, there are command line options in the pgo client that will let a user override these default global values to offer you the user a way to specify very targeted storage configurations when needed (e.g. disaster recovery storage for certain backups).\n You can set the storage AccessMode values to the following -\n ReadWriteMany - mounts the volume as read-write by many nodes\n ReadWriteOnce - mounts the PVC as read-write by a single node\n ReadOnlyMany - mounts the PVC as read-only by many nodes\n These Storage configurations are validated when the pgo-apiserver starts, if a non-valid configuration is found, the apiserver will abort. These Storage values are only read at apiserver start time.\n The following StorageType values are possible -\n dynamic - this will allow for dynamic provisioning of storage using a StorageClass.\n existing - This setting allows you to use a PVC that already exists. For example, if you have a NFS volume mounted to a PVC, all PostgreSQL clusters can write to that NFS volume mount via a common PVC. When set, the Name setting is used for the PVC.\n create - This setting allows for the creation of a new PVC for each PostgreSQL cluster using a naming convention of clustername. When set, the Size, AccessMode settings are used in constructing the new PVC.\n emptydir - If a StorageType value is not defined, emptydir is used by default. This is a volume type that’s created when a pod is assigned to a node and exists as long as that pod remains running on that node; it is deleted as soon as the pod is manually deleted or removed from the node.\n The operator will create new PVCs using this naming convention: dbname where dbname is the database name you have specified. For example, if you run:\n pgo create cluster example1 It will result in a PVC being created named example1 and in the case of a backup job, the pvc is named example1-backup\n There are currently 3 sample pgo configuration files provided for users to use as a starting configuration -\n pgo.yaml.emptydir - this configuration specifies emptydir storage to be used for databases\n pgo.yaml.nfs - this configuration specifies create storage to be used, this is used for NFS storage for example where you want to have a unique PVC created for each database\n pgo.yaml.dynamic - this configuration specifies dynamic storage to be used, namely a storageclass that refers to a dynamic provisioning strorage such as StorageOS or Portworx, or GCE.\n Overriding Container Resources Configuration Defaults In the pgo.yaml configuration file you have the option to configure a default container resources configuration that when set will add CPU and memory resource limits and requests values into each database container when the container is created.\n You can also override the default value using the --resources-config command flag when creating a new cluster -\n pgo create cluster testcluster --resources-config=large Note, if you try to allocate more resources than your host or Kube cluster has available then you will see your pods wait in a Pending status. The output from a kubectl describe pod command will show output like this in this event -\n Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedScheduling 49s (x8 over 1m) default-scheduler No nodes are available that match all of the predicates: Insufficient memory (1). Overriding Storage Configuration Defaults pgo create cluster testcluster --storage-config=bigdisk That example will create a cluster and specify a storage configuration of bigdisk to be used for the primary database storage. The replica storage will default to the value of ReplicaStorage as specified in pgo.yaml.\n pgo create cluster testcluster2 --storage-config=fastdisk --replica-storage-config=slowdisk That example will create a cluster and specify a storage configuration of fastdisk to be used for the primary database storage, while the replica storage will use the storage configuration slowdisk.\n pgo backup testcluster --storage-config=offsitestorage That example will create a backup and use the offsitestorage storage configuration for persisting the backup.\n Disaster Recovery Using Storage Configurations A simple mechanism for partial disaster recovery can be obtained by leveraging network storage, Kubernetes storage classes, and the storage configuration options within the Operator.\n For example, if you define a Kubernetes storage class that refers to a storage backend that is running within your disaster recovery site, and then use that storage class as a storage configuration for your backups, you essentially have moved your backup files automatically to your disaster recovery site thanks to network storage.\n postgres-operator Container Configuration To enable debug level messages from the operator pod, set the CRUNCHY_DEBUG environment variable to true within its deployment file deployment.json.\n Operator Templates The database and cluster Kubernetes objects that get created by the operator are based on JSON templates that are added into the operator deployment by means of a mounted volume.\n The templates are located in the $COROOT/conf/postgres-operator directory and are added into a config map which is mounted by the operator deployment.\n bash Completion There is a bash completion file that is included for users to try located in the repository at examples/pgo-bash-completion. To use it -\n cp $COROOT/examples/pgo-bash-completion /etc/bash_completion.d/pgo su - $USER REST API Because the apiserver implements a REST API, it is possible to integrate with it using your own application code. To demonstrate this, the following curl commands show the API usage -\n pgo version\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/version pgo show policy all\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/policies/all pgo show pvc danger\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/pvc/danger pgo show cluster mycluster\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/clusters/mycluster pgo show upgrade mycluster\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/upgrades/mycluster pgo test mycluster\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/clusters/test/mycluster pgo show backup mycluster\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/backups/mycluster Deploying pgpool One option with pgo is enabling the creation of a pgpool deployment in addition to the PostgreSQL cluster. Running pgpool is a logical inclusion when the Kubernetes cluster includes both a primary database in addition to some number of replicas deployed. The current pgpool configuration deployed by the operator only works when both a primary and a replica are running.\n When a user creates the cluster a command flag can be passed as follows to enable the creation of the pgpool deployment.\n pgo create cluster cluster1 --pgpool pgo scale cluster1 This will cause the operator to create a Deployment that includes the crunchy-pgpool container along with a replica. That container will create a configuration that will perform SQL routing to your cluster services, both for the primary and replica services.\n Pgpool examines the SQL it receives and routes the SQL statement to either the primary or replica based on the SQL action. Specifically, it will send writes and updates to only the primary service. It will send read-only statements to the replica service.\n When the operator deploys the pgpool container, it creates a secret (e.g. mycluster-pgpool-secret) that contains pgpool configuration files. It fills out templated versions of these configuration files specifically for this PostgreSQL cluster.\n Part of the pgpool deployment also includes creating a pool_passwd file that will allow the testuser credential to authenticate to pgpool. Adding additional users to the pgpool configuration currently requires human intervention specifically creating a new pgpool secret and bouncing the pgpool pod to pick up the updated secret. Future operator releases will attempt to provide pgo commands to let you automate the addition or removal of a pgpool user.\n Currently to update a pgpool user within the pool_passwd configuration file, it is necessary to copy the existing files from the secret to your local system, update the credentials in pool_passwd with the new user credentials, recreate the pgpool secret, and finally restart the pgpool pod to pick up the updated configuration files.\n As an example -\n kubectl cp demo/wed10-pgpool-6cc6f6598d-wcnmf:/pgconf/ /tmp/foo That command gets a running set of secret pgpool configuration files and places them locally on your system for you to edit.\n pgpool requires a specially formatted password credential to be placed into pool_passwd. There is a golang program included in $COROOT/golang-examples/gen-pgpool-pass.go that, when run, will generate the value to use within the pgpool_passwd configuration file.\n go run $COROOT/golang-examples/gen-pgpool-pass.go Enter Username: testuser Enter Password: Password typed: e99Mjt1dLz hash of password is [md59c4017667828b33762665dc4558fbd76] The value md59c4017667828b33762665dc4558fbd76 is what you will use in the pool_passwd file.\n Then, create the new secrets file based on those updated files -\n $COROOT/bin/create-pgpool-secrets.sh Lastly for pgpool to pick up the new secret file, delete the existing deployment pod -\n kubectl get deployment wed-pgpool kubectl delete pod wed10-pgpool-6cc6f6598d-wcnmf The pgpool deployment will spin up another pgpool which will pick up the updated secret file.\n " }, { - "uri": "https://crunchydata.github.io/postgres-operator/installation/configuration/", - "title": "Configuration", + "uri": "https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/", + "title": "Upgrading the Operator", "tags": [], "description": "", - "content": "Table of Contents Overview Openshift Container Platform Security Configuration Kube RBAC Basic Authentication Configure TLS pgo RBAC apiserver Configuration postgres-operator Container Configuration bash Completion REST API Deploying pgpool v3.0, 2018-06-04\n Overview This document describes how to configure the operator beyond the default configurations in addition to detailing what the configuration settings mean.\n Openshift Container Platform To run the Operator on Openshift Container Platform note the following requirements -\n Openshift Container Platform 3.7 or greater is required due to the dependence on Custom Resource Definitions.\n The CO_CMD environment variable should be set to oc when operating in an Openshift environment.\n Security Configuration Kube RBAC The apiserver and postgres-operator containers access Kubernetes resources and therefore require privileges for successful Kubernetes interactions. The rbac.yaml file includes a set of roles and bindings that allow the operator to work. These are fine grained controls that you can adjust to your local Kubernetes cluster depending on specific security requirements. The rbac.yaml file is executed when the operator is first deployed to the Kubernetes cluster.\n Permissions are granted to the Operator by means of a Service Account called postgres-operator. That service account is added through the Operator deployment.\n Warning If you are not using the demo namespace, it will be required to edit the following and change the namespace where the service account and cluster role bindings will be deployed.\n $COROOT/deploy/service-account.yaml\n $COROOT/deploy/cluster-role-binding.yaml\n See here for more details on how to enable RBAC roles and modify the scope of the permissions to suit your needs.\n Basic Authentication Basic authentication between the host and the apiserver is required. It will be necessary to configure the pgo client to specify a basic authentication username and password through the creation a file in the user\u0026#8217;s home directory named .pgouser. It will look similar to this, and contain only a single line -\n username:password The above excerpt specifies a username of username and a password of password. These values will be read by the pgo client and passed to the apiserver on each REST API call.\n For the apiserver, a list of usernames and passwords is specified in the apiserver-conf-secret Secret. The values specified in a deployment are found in the following location -\n $COROOT/conf/apiserver/pgouser The sample configuration for pgouser is as follows -\n username:password testuser:testpass Modify these values to be unique to your environment.\n If the username and password passed by clients to the apiserver do not match, the REST call will fail and a log message will be produced in the apiserver container log. The client will receive a 401 HTTP status code if they are not able to authenticate.\n If the pgouser file is not found in the home directory of the pgo user then the next searched location is /etc/pgo/pgouser. If the file is not found in either of the locations, the pgo client searches for the existence of a PGOUSER environment variable in order to locate a path to the basic authentication file.\n Basic authentication can be entirely disabled by setting the BasicAuth setting in the pgo.yaml configuration file to false.\n Configure TLS TLS is used to secure communications to the apiserver. Sample keys and certifications that can be used by TLS are found here -\n $COROOT/conf/apiserver/server.crt $COROOT/conf/apiserver/server.key If you want to generate your own keys, you can use the script found in -\n $COROOT/bin/make-certs.sh The pgo client is required to use keys to connect to the apiserver. Specify the keys for pgo by setting the following environment variables -\n export PGO_CA_CERT=$COROOT/conf/apiserver/server.crt export PGO_CLIENT_CERT=$COROOT/conf/apiserver/server.crt export PGO_CLIENT_KEY=$COROOT/conf/apiserver/server.key The sample server keys are used as the client keys; adjust to suit security requirements.\n For the apiserver TLS configuration, the keys are included in the apiserver-conf-secret Secret when the apiserver is deployed. See the $COROOT/deploy/deploy.sh script which is where the secret is created.\n The apiserver listens on port 8443 (e.g. https://postgres-operator:8443) by default.\n You can set InsecureSkipVerify to true by setting the NO_TLS_VERIFY environment variable in the deployment.json file to true. By default this value is set to false if you do not specify a value.\n pgo RBAC The pgo command line utility talks to the apiserver REST API instead of the Kubernetes API. It is therefore necessary for the pgo client to make use of RBAC configuration.\n Starting in Release 3.0, the /conf/apiserver/pgorole is used to define some sample pgo roles, pgadmin and pgoreader.\n These roles are meant as examples that you can configure to suit security requirements as necessary. The pgadmin role grants a user authorization to all pgo commands. The pgoreader only grants access to pgo commands that display information such as pgo show cluster.\n The pgorole file is read at start up time when the operator is deployed to the Kubernetes cluster.\n Also, the pgouser file now includes the role that is assigned to a specific user as follows -\n username:password:pgoadmin testuser:testpass:pgoadmin readonlyuser:testpass:pgoreader The following list shows the current complete list of possible pgo permissions -\n Table 1. pgo Permissions Permission Description ShowCluster\n allow pgo show cluster\n CreateCluster\n allow pgo create cluster\n TestCluster\n allow pgo test mycluster\n ShowBackup\n allow pgo show backup\n CreateBackup\n allow pgo backup mycluster\n DeleteBackup\n allow pgo delete backup mycluster\n Label\n allow pgo label\n Load\n allow pgo load\n CreatePolicy\n allow pgo create policy\n DeletePolicy\n allow pgo delete policy\n ShowPolicy\n allow pgo show policy\n ApplyPolicy\n allow pgo apply policy\n ShowPVC\n allow pgo show pvc\n CreateUpgrade\n allow pgo upgrade\n ShowUpgrade\n allow pgo show upgrade\n DeleteUpgrade\n allow pgo delete upgrade\n CreateUser\n allow pgo create user\n CreateFailover\n allow pgo failover\n User\n allow pgo user\n Version\n allow pgo version\n If the user is unauthorized for a pgo command, the user will get back this response -\n FATA[0000] Authentication Failed: 40 apiserver Configuration The postgres-operator pod includes the apiserver which is a REST API that pgo users are able to communicate with.\n The apiserver uses the following configuration files found in $COROOT/conf/apiserver to determine how the Operator will provision PostgreSQL containers -\n $COROOT/conf/apiserver/pgo.yaml $COROOT/conf/apiserver/pgo.lspvc-template.json $COROOT/conf/apiserver/pgo.load-template.json Note that the default pgo.yaml file assumes you are going to use HostPath Persistent Volumes for your storage configuration. It will be necessary to adjust this file for NFS or other storage configurations. Some examples of how are listed in the manual installation document.\n The version of PostgreSQL container the Operator will deploy is determined by the CCPImageTag setting in the $COROOT/conf/apiserver/pgo.yaml configuration file. By default, this value is set to the latest release of the Crunchy Container Suite.\n pgo.yaml The default pgo.yaml configuration file, included in $COROOT/conf/apiserver/pgo.yaml, looks like this -\n BasicAuth: true Cluster: CCPImageTag: centos7-10.4-1.8.3 Port: 5432 User: testuser Database: userdb PasswordAgeDays: 60 PasswordLength: 8 Strategy: 1 Replicas: 0 ArchiveMode: false ArchiveTimeout: 60 PrimaryStorage: storage1 BackupStorage: storage1 ReplicaStorage: storage1 Storage: storage1: AccessMode: ReadWriteMany Size: 200M StorageType: create storage2: AccessMode: ReadWriteMany Size: 333M StorageType: create storage3: AccessMode: ReadWriteMany Size: 440M StorageType: create DefaultContainerResource: small ContainerResources: small: RequestsMemory: 2Gi RequestsCPU: 0.5 LimitsMemory: 2Gi LimitsCPU: 1.0 large: RequestsMemory: 8Gi RequestsCPU: 2.0 LimitsMemory: 12Gi LimitsCPU: 4.0 Pgo: Audit: false Metrics: false LSPVCTemplate: /config/pgo.lspvc-template.json CSVLoadTemplate: /config/pgo.load-template.json COImagePrefix: crunchydata COImageTag: centos7-2.7 Values in the pgo configuration file have the following meaning:\n Table 2. pgo Configuration File Definitions Setting Definition BasicAuth\n if set to true will enable Basic Authentication\n Cluster.CCPImageTag\n newly created containers will be based on this image version (e.g. centos7-10.4-1.8.3), unless you override it using the --ccp-image-tag command line flag\n Cluster.Port\n the PostgreSQL port to use for new containers (e.g. 5432)\n Cluster.User\n the PostgreSQL normal user name\n Cluster.Strategy\n sets the deployment strategy to be used for deploying a cluster, currently there is only strategy 1\n Cluster.Replicas\n the number of cluster replicas to create for newly created clusters\n Cluster.Policies\n optional, list of policies to apply to a newly created cluster, comma separated, must be valid policies in the catalog\n Cluster.PasswordAgeDays\n optional, if set, will set the VALID UNTIL date on passwords to this many days in the future when creating users or setting passwords, defaults to 60 days\n Cluster.PasswordLength\n optional, if set, will determine the password length used when creating passwords, defaults to 8\n Cluster.ArchiveMode\n optional, if set to true will enable archive logging for all clusters created, default is false.\n Cluster.ArchiveTimeout\n optional, if set, will determine the archive timeout setting used when ArchiveMode is true, defaults to 60 seconds\n PrimaryStorage\n required, the value of the storage configuration to use for the primary PostgreSQL deployment\n BackupStorage\n required, the value of the storage configuration to use for backups\n ReplicaStorage\n required, the value of the storage configuration to use for the replica PostgreSQL deployments\n Storage.storage1.StorageClass\n for a dynamic storage type, you can specify the storage class used for storage provisioning(e.g. standard, gold, fast)\n Storage.storage1.AccessMode\n the access mode for new PVCs (e.g. ReadWriteMany, ReadWriteOnce, ReadOnlyMany). See below for descriptions of these.\n Storage.storage1.Size\n the size to use when creating new PVCs (e.g. 100M, 1Gi)\n Storage.storage1.StorageType\n supported values are either dynamic, existing, create, or emptydir, if not supplied, emptydir is used\n Storage.storage1.Fsgroup\n optional, if set, will cause a SecurityContext and fsGroup attributes to be added to generated Pod and Deployment definitions\n Storage.storage1.SupplementalGroups\n optional, if set, will cause a SecurityContext to be added to generated Pod and Deployment definitions\n DefaultContainerResource\n optional, the value of the container resources configuration to use for all database containers, if not set, no resource limits or requests are added on the database container\n ContainerResources.small.RequestsMemory\n request size of memory in bytes\n ContainerResources.small.RequestsCPU\n request size of CPU cores\n ContainerResources.small.LimitsMemory\n request size of memory in bytes\n ContainerResources.small.LimitsCPU\n request size of CPU cores\n ContainerResources.large.RequestsMemory\n request size of memory in bytes\n ContainerResources.large.RequestsCPU\n request size of CPU cores\n ContainerResources.large.LimitsMemory\n request size of memory in bytes\n ContainerResources.large.LimitsCPU\n request size of CPU cores\n Pgo.LSPVCTemplate\n the PVC lspvc template file that lists PVC contents\n Pgo.LoadTemplate\n the load template file used for load jobs\n Pgo.COImagePrefix\n image tag prefix to use for the Operator containers\n Pgo.COImageTag\n image tag to use for the Operator containers\n Pgo.Audit\n boolean, if set to true will cause each apiserver call to be logged with an audit marking\n Pgo.Metrics\n boolean, if set to true will cause each new cluster to include crunchy-collect as a sidecar container for metrics collection, if set to false (default), users can still add metrics on a cluster-by-cluster basis using the pgo command flag --metrics\n Storage Configurations You can now define n-number of Storage configurations within the pgo.yaml file. Those Storage configurations follow these conventions -\n they must have lowercase name (e.g. storage1)\n they must be unique names (e.g. mydrstorage, faststorage, slowstorage)\n These Storage configurations are referenced in the BackupStorage, ReplicaStorage, and PrimaryStorage configuration values. However, there are command line options in the pgo client that will let a user override these default global values to offer you the user a way to specify very targeted storage configurations when needed (e.g. disaster recovery storage for certain backups).\n You can set the storage AccessMode values to the following -\n ReadWriteMany - mounts the volume as read-write by many nodes\n ReadWriteOnce - mounts the PVC as read-write by a single node\n ReadOnlyMany - mounts the PVC as read-only by many nodes\n These Storage configurations are validated when the pgo-apiserver starts, if a non-valid configuration is found, the apiserver will abort. These Storage values are only read at apiserver start time.\n The following StorageType values are possible -\n dynamic - this will allow for dynamic provisioning of storage using a StorageClass.\n existing - This setting allows you to use a PVC that already exists. For example, if you have a NFS volume mounted to a PVC, all PostgreSQL clusters can write to that NFS volume mount via a common PVC. When set, the Name setting is used for the PVC.\n create - This setting allows for the creation of a new PVC for each PostgreSQL cluster using a naming convention of clustername. When set, the Size, AccessMode settings are used in constructing the new PVC.\n emptydir - If a StorageType value is not defined, emptydir is used by default. This is a volume type that’s created when a pod is assigned to a node and exists as long as that pod remains running on that node; it is deleted as soon as the pod is manually deleted or removed from the node.\n The operator will create new PVCs using this naming convention: dbname where dbname is the database name you have specified. For example, if you run:\n pgo create cluster example1 It will result in a PVC being created named example1 and in the case of a backup job, the pvc is named example1-backup\n There are currently 3 sample pgo configuration files provided for users to use as a starting configuration -\n pgo.yaml.emptydir - this configuration specifies emptydir storage to be used for databases\n pgo.yaml.nfs - this configuration specifies create storage to be used, this is used for NFS storage for example where you want to have a unique PVC created for each database\n pgo.yaml.dynamic - this configuration specifies dynamic storage to be used, namely a storageclass that refers to a dynamic provisioning strorage such as StorageOS or Portworx, or GCE.\n Overriding Container Resources Configuration Defaults In the pgo.yaml configuration file you have the option to configure a default container resources configuration that when set will add CPU and memory resource limits and requests values into each database container when the container is created.\n You can also override the default value using the --resources-config command flag when creating a new cluster -\n pgo create cluster testcluster --resources-config=large Note, if you try to allocate more resources than your host or Kube cluster has available then you will see your pods wait in a Pending status. The output from a kubectl describe pod command will show output like this in this event -\n Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedScheduling 49s (x8 over 1m) default-scheduler No nodes are available that match all of the predicates: Insufficient memory (1). Overriding Storage Configuration Defaults pgo create cluster testcluster --storage-config=bigdisk That example will create a cluster and specify a storage configuration of bigdisk to be used for the primary database storage. The replica storage will default to the value of ReplicaStorage as specified in pgo.yaml.\n pgo create cluster testcluster2 --storage-config=fastdisk --replica-storage-config=slowdisk That example will create a cluster and specify a storage configuration of fastdisk to be used for the primary database storage, while the replica storage will use the storage configuration slowdisk.\n pgo backup testcluster --storage-config=offsitestorage That example will create a backup and use the offsitestorage storage configuration for persisting the backup.\n Disaster Recovery Using Storage Configurations A simple mechanism for partial disaster recovery can be obtained by leveraging network storage, Kubernetes storage classes, and the storage configuration options within the Operator.\n For example, if you define a Kubernetes storage class that refers to a storage backend that is running within your disaster recovery site, and then use that storage class as a storage configuration for your backups, you essentially have moved your backup files automatically to your disaster recovery site thanks to network storage.\n postgres-operator Container Configuration To enable debug level messages from the operator pod, set the CRUNCHY_DEBUG environment variable to true within its deployment file deployment.json.\n Operator Templates The database and cluster Kubernetes objects that get created by the operator are based on JSON templates that are added into the operator deployment by means of a mounted volume.\n The templates are located in the $COROOT/conf/postgres-operator directory and are added into a config map which is mounted by the operator deployment.\n bash Completion There is a bash completion file that is included for users to try located in the repository at examples/pgo-bash-completion. To use it -\n cp $COROOT/examples/pgo-bash-completion /etc/bash_completion.d/pgo su - $USER REST API Because the apiserver implements a REST API, it is possible to integrate with it using your own application code. To demonstrate this, the following curl commands show the API usage -\n pgo version\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/version pgo show policy all\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/policies/all pgo show pvc danger\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/pvc/danger pgo show cluster mycluster\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/clusters/mycluster pgo show upgrade mycluster\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/upgrades/mycluster pgo test mycluster\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/clusters/test/mycluster pgo show backup mycluster\n curl -v -X GET -u readonlyuser:testpass -H \"Content-Type: application/json\" --insecure https://10.101.155.218:8443/backups/mycluster Deploying pgpool One option with pgo is enabling the creation of a pgpool deployment in addition to the PostgreSQL cluster. Running pgpool is a logical inclusion when the Kubernetes cluster includes both a primary database in addition to some number of replicas deployed. The current pgpool configuration deployed by the operator only works when both a primary and a replica are running.\n When a user creates the cluster a command flag can be passed as follows to enable the creation of the pgpool deployment.\n pgo create cluster cluster1 --pgpool pgo scale cluster1 This will cause the operator to create a Deployment that includes the crunchy-pgpool container along with a replica. That container will create a configuration that will perform SQL routing to your cluster services, both for the primary and replica services.\n Pgpool examines the SQL it receives and routes the SQL statement to either the primary or replica based on the SQL action. Specifically, it will send writes and updates to only the primary service. It will send read-only statements to the replica service.\n When the operator deploys the pgpool container, it creates a secret (e.g. mycluster-pgpool-secret) that contains pgpool configuration files. It fills out templated versions of these configuration files specifically for this PostgreSQL cluster.\n Part of the pgpool deployment also includes creating a pool_passwd file that will allow the testuser credential to authenticate to pgpool. Adding additional users to the pgpool configuration currently requires human intervention specifically creating a new pgpool secret and bouncing the pgpool pod to pick up the updated secret. Future operator releases will attempt to provide pgo commands to let you automate the addition or removal of a pgpool user.\n Currently to update a pgpool user within the pool_passwd configuration file, it is necessary to copy the existing files from the secret to your local system, update the credentials in pool_passwd with the new user credentials, recreate the pgpool secret, and finally restart the pgpool pod to pick up the updated configuration files.\n As an example -\n kubectl cp demo/wed10-pgpool-6cc6f6598d-wcnmf:/pgconf/ /tmp/foo That command gets a running set of secret pgpool configuration files and places them locally on your system for you to edit.\n pgpool requires a specially formatted password credential to be placed into pool_passwd. There is a golang program included in $COROOT/golang-examples/gen-pgpool-pass.go that, when run, will generate the value to use within the pgpool_passwd configuration file.\n go run $COROOT/golang-examples/gen-pgpool-pass.go Enter Username: testuser Enter Password: Password typed: e99Mjt1dLz hash of password is [md59c4017667828b33762665dc4558fbd76] The value md59c4017667828b33762665dc4558fbd76 is what you will use in the pool_passwd file.\n Then, create the new secrets file based on those updated files -\n $COROOT/bin/create-pgpool-secrets.sh Lastly for pgpool to pick up the new secret file, delete the existing deployment pod -\n kubectl get deployment wed-pgpool kubectl delete pod wed10-pgpool-6cc6f6598d-wcnmf The pgpool deployment will spin up another pgpool which will pick up the updated secret file.\n " + "content": "v3.1, 2018-06-13\n Upgrading from v2.4 to v2.5 For a full list of additions and revisions that occurred in the PostgreSQL Operator v2.5 release, please view the related release page here.\n Required Updates This section notes some required steps that will need to be taken in the process of upgrading from v2.4 to v2.5.\n Configuration File It will be necessary to update your existing pgo.yaml configuration file where the Storage Configuration sections are concerned. The updated file for v2.5 can be found here. The file contained within the local installation of the Operator is located by default in the following location -\n $COROOT/conf/apiserver/pgo.yaml Secrets 2.5 changed the names of the database credentials that are created by default in order to be consistent with the way new database credentials are named.\n It will be necessary to run the following script to update your existing clusters. This script will essentially copy the existing secrets values and create new secrets with those same values but named to the new standard. Run the script by passing in the name of an existing cluster as a parameter.\n $COROOT/bin/upgrade-secret.sh Upgrading from v2.5 to v2.6 For a full list of additions and revisions that occurred in the PostgreSQL Operator v2.5 release, please view the related release page here.\n Required Updates This section notes some required steps that will need to be taken in the process of upgrading from v2.5 to v2.6.\n Configuration File One update in v2.6 changed the pgo.yaml file through removing the Debug flag. The Pgo.Debug variable can now be removed from the pgo.yaml file as a result. The debug flag is now called CRUNCHY_DEBUG and is set in the deployment.json file as a default environment variable.\n Container Resources Release 2.6 added the concept of container resource configurations to the pgo.yaml file. In order to specify the optional container resource configurations, add a section as follows to your pgo.yaml file -\n DefaultContainerResource: small ContainerResources: small: RequestsMemory: 2Gi RequestsCPU: 0.5 LimitsMemory: 2Gi LimitsCPU: 1.0 large: RequestsMemory: 8Gi RequestsCPU: 2.0 LimitsMemory: 12Gi LimitsCPU: 4.0 \u0026lt;div class=\"notices warning\" \u0026gt;\u0026lt;div class=\"paragraph\"\u0026gt; \u0026lt;p\u0026gt;If these settings are set incorrectly or if the Kubernetes cluster cannot meet the defined memory and CPU requirements, deployments will go into a \u0026lt;strong\u0026gt;pending\u0026lt;/strong\u0026gt; state.\u0026lt;/p\u0026gt; \u0026lt;/div\u0026gt; \u0026lt;/div\u0026gt;\n Kube RBAC Release 2.6 added a rbac.yaml file to capture the Kube RBAC rules. These RBAC rules allow the apiserver and postgres-operator containers access to the Kubernetes resources required for the operator to work. As part of the deployment process, it is necessary to execute the rbac.yaml file to set the roles and bindings required by the operator. Adjust this file to suit local security requirements.\n Application RBAC Release 2.6 added an RBAC capability to secure the pgo application. The pgouser now has a role appended at the end of of each user definition as follows -\n username:password:pgoadmin testuser:testpass:pgoadmin readonlyuser:testpass:pgoreader These are defined in the following file -\n $COROOT/conf/apiserver/pgouser To match the behavior of the pre 2.6 releases, the pgadmin role is set on the previous user definitions, but a readonlyuser is now defined to test other role definitions. The roles are defined in a new file called pgorole. This file defines each role and the permissions for that role. By default, two roles are defined as samples -\n pgoadmin pgoreader Adjust these default settings to meet local security requirements.\n The format of this file is as follows -\n rolename: permissionA, permissionB These are defined in the following file -\n $COROOT/conf/apiserver/pgorole The complete set of permissions is documented in the Configuration document.\n User Creation Release 2.6 replaced the pgo user --add command with the pgo create user command to improve consistency across command usage. Any scripts written using the older style of command require an update to use the new command syntax.\n Replica CRD There is a new Kubernetes Custom Resource Definition that serves the purpose of holding replica information, called pgreplicas. This CRD is populated with the pgo scale command and is used to hold per-replica specific information such as the resource and storage configurations requested at run time.\n \n " }, { "uri": "https://crunchydata.github.io/postgres-operator/contributing/", "title": "Contributing", "tags": [], "description": "", - "content": "v3.0, 2018-06-04\n Getting Started Welcome! Thank you for your interest in contributing. Before submitting a new issue or pull request to the Crunchy Data PostgreSQL Operator project on GitHub, please review any open or closed issues here in addition to any existing open pull requests.\n Documentation The documentation website (located at https://crunchydata.github.io/postgres-operator/) is generated using Hugo and GitHub Pages. If you would like to build the documentation locally, view the official Installing Hugo guide to set up Hugo locally. You can then start the server by running the following commands -\n cd $COROOT/hugo/ vi config.toml hugo server When you edit config.toml, you\u0026#8217;ll set baseURL = \"/\". This will make the local version of the Hugo server accessible by default from localhost:1313. Once you\u0026#8217;ve run hugo server, that will let you interactively make changes to the documentation as desired and view the updates in real-time.\n When you\u0026#8217;re ready to commit a change, please view and run the script located at $COROOT/generate-docs.sh which will automatically generate a new set of webpages using Hugo that will update the live website after the change has been committed to the repository.\n " + "content": "v3.1, 2018-06-13\n Getting Started Welcome! Thank you for your interest in contributing. Before submitting a new issue or pull request to the Crunchy Data PostgreSQL Operator project on GitHub, please review any open or closed issues here in addition to any existing open pull requests.\n Documentation The documentation website (located at https://crunchydata.github.io/postgres-operator/) is generated using Hugo and GitHub Pages. If you would like to build the documentation locally, view the official Installing Hugo guide to set up Hugo locally. You can then start the server by running the following commands -\n cd $COROOT/hugo/ vi config.toml hugo server When you edit config.toml, you\u0026#8217;ll set baseURL = \"/\". This will make the local version of the Hugo server accessible by default from localhost:1313. Once you\u0026#8217;ve run hugo server, that will let you interactively make changes to the documentation as desired and view the updates in real-time.\n When you\u0026#8217;re ready to commit a change, please view and run the script located at $COROOT/generate-docs.sh which will automatically generate a new set of webpages using Hugo that will update the live website after the change has been committed to the repository.\n " }, { "uri": "https://crunchydata.github.io/postgres-operator/how-it-works/", "title": "How it Works", "tags": [], "description": "", - "content": "Table of Contents Reference Architecture Custom Resource Definitions Command Line Interface Operator Deployment CLI Design Verbs Affinity Debugging Persistent Volumes PostgreSQL Operator Deployment Strategies Strategies Specifying a Strategy Strategy Template Files Default Cluster Deployment Strategy (1) Cluster Deletion Custom Postgres Configurations Metrics Collection v3.0, 2018-06-04\n Reference Architecture So, what does the Postgres Operator actually deploy when you create a cluster?\n On this diagram, objects with dashed lines are components that are optionally deployed as part of a PostgreSQL Cluster by the operator. Objects with solid lines are the fundamental and required components.\n For example, within the Primary Deployment, the metrics container is completely optional. That component can be deployed using either the operator configuration or command line arguments if you want to cause metrics to be collected from the Postgres container.\n Replica deployments are similar to the primary deployment but are optional. A replica is not required to be created unless the capability for one is necessary. As you scale up the Postgres cluster, the standard set of components gets deployed and replication to the primary is started.\n Notice that each cluster deployment gets its own unique Persistent Volumes. Each volume can use different storage configurations which is quite powerful.\n Custom Resource Definitions Kubernetes Custom Resource Definitions are used in the design of the PostgreSQL Operator to define the following -\n Cluster - pgclusters\n Backup - pgbackups\n Upgrade - pgupgrades\n Policy - pgpolicies\n Tasks - pgtasks\n Command Line Interface The pgo command line interface (CLI) is used by a normal end-user to create databases or clusters, or make changes to existing databases.\n The CLI interacts with the apiserver REST API deployed within the postgres-operator deployment.\n From the CLI, users can view existing clusters that were deployed using the CLI and Operator. Objects that were not previously created by the Crunchy Operator are now viewable from the CLI.\n Operator Deployment The PostgreSQL Operator runs within a Deployment in the Kubernetes cluster. An administrator will deploy the operator deployment using the provided script. Once installed and running, the Operator pod will start watching for certain defined events.\n The operator watches for create/update/delete actions on the pgcluster custom resource definitions. When the CLI creates for example a new pgcluster custom resource definition, the operator catches that event and creates pods and services for that new cluster request.\n CLI Design The CLI uses the cobra package to implement CLI functionality like help text, config file processing, and command line parsing.\n The pgo client is essentially a REST client which communicates to the pgo-apiserver REST server running within the Operator pod. In some cases you might want to split the apiserver out into its own Deployment but the default deployment has a consolidated pod that contains both the apiserver and operator containers simply for convenience of deployment and updates.\n Verbs A user works with the CLI by entering verbs to indicate what they want to do, as follows.\n pgo show cluster all pgo delete cluster db1 db2 db3 pgo create cluster mycluster In the above example, the show, backup, delete, and create verbs are used. The CLI is case sensitive and supports only lowercase.\n Affinity You can have the Operator add an affinity section to a new Cluster Deployment if you want to cause Kubernetes to attempt to schedule a primary cluster to a specific Kubernetes node.\n You can see the nodes on your Kube cluster by running the following -\n kubectl get nodes You can then specify one of those names (e.g. kubeadm-node2) when creating a cluster -\n pgo create cluster thatcluster --node-name=kubeadm-node2 The affinity rule inserted in the Deployment will used a preferred strategy so that if the node were down or not available, Kube would go ahead and schedule the Pod on another node.\n You can always view the actual node your cluster pod is scheduled on through the following command.\n kubectl get pod -o wide When you scale up a Cluster and add a replica, the scaling will take into account the use of --node-name. If it sees that a cluster was created with a specific node name, then the replica Deployment will add an affinity rule to attempt to schedule the replica on a different node than the node the primary is schedule on. This provides a simple version of high availability and causes the primary and replicas to not live on the same Kubernetes node.\n Debugging To see if the operator pod is running enter the following -\n kubectl get pod -l 'name=postgres-operator' To verify the operator is running and has deployed the Custom Resources execute the following -\n kubectl get crd The full list of CRDs that are created over time are shown below.\n NAME KIND pgbackups.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io pgclusters.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io pgpolicies.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io pgpolicylogs.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io pgupgrades.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io pgtasks.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io Persistent Volumes Currently, the operator does not delete persistent volumes by default. Instead, it deletes the claims on the volumes. Starting with release 2.4, the Operator will create Jobs that actually run rm commands on the data volumes before actually removing the Persistent Volumes if the user passes a `--delete-data ` flag when deleting a database cluster.\n Likewise, if the user passes --delete-backups during cluster deletion a Job is created to remove all the backups for a cluster include the related Persistent Volume.\n PostgreSQL Operator Deployment Strategies This section describes the various deployment strategies offered by the operator. A deployment in this case is the set of objects created in Kubernetes when a custom resource definition of type pgcluster is created. CRDs are created by the pgo client command and acted upon by the postgres operator.\n Strategies To support different types of deployments, the operator supports multiple strategy implementations. Currently there is only a default cluster strategy.\n In the future, more deployment strategies will be supported to offer users more customization to what they see deployed in their Kubernetes cluster.\n Being open source, users can also write their own strategy!\n Specifying a Strategy In the pgo client configuration file, there is a `CLUSTER.STRATEGY `setting. The current value of the default strategy is 1. If you don\u0026#8217;t set that value, the default strategy is assumed. If you set that value to something not supported, the operator will log an error.\n Strategy Template Files Each strategy supplies its set of templates used by the operator to create new pods, services, etc.\n When the operator is deployed, part of the deployment process is to copy the required strategy templates into a ConfigMap (operator-conf) that gets mounted into /operator-conf within the operator pod.\n The directory structure of the strategy templates is as follows -\n |-- backup-job.json |-- cluster | |-- 1 | |-- cluster-deployment-1.json | |-- cluster-replica-deployment-1.json | |-- cluster-service-1.json | |-- pvc.json In this structure, each strategy\u0026#8217;s templates live in a subdirectory that matches the strategy identifier. The default strategy templates are denoted by the value of 1 in the directory structure above.\n If you add another strategy, the file names must be unique within the entire strategy directory. This is due to the way the templates are stored within the ConfigMap.\n Default Cluster Deployment Strategy (1) Using the default cluster strategy, a cluster when created by the operator will create the following on a Kubernetes cluster -\n deployment running a Postgres primary container with replica count of 1\n service mapped to the primary Postgres database\n service mapped to the replica Postgres database\n PVC for the primary will be created if not specified in configuration, this assumes you are using a non-shared volume technology (e.g. Amazon EBS), if the CLUSTER.PVC_NAME value is set in your configuration then a shared volume technology is assumed (e.g. HostPath or NFS), if a PVC is created for the primary, the naming convention is clustername where clustername is the name of your cluster.\n If you want to add a Postgres replica to a cluster, you will scale the cluster. For each replica-count, a Deployment will be created that acts as a PostgreSQL replica.\n This is very different than using a StatefulSet to scale up PostgreSQL. Why would you do it this way? Imagine a case where you want different parts of your PostgreSQL cluster to use different storage configurations,. With this method, it can be done through using specific placement and deployments of each part of the cluster.\n This same concept applies to node selection for the PostgreSQL cluster components. The Operator will let you define precisely which node that the PostgreSQL component should be placed upon using node affinity rules.\n Cluster Deletion When you run the following, the cluster and its services will be deleted. However, the data files and backup files will remain as well as the PVCs for this cluster.\n pgo delete cluster mycluster However, to remove the data files from the PVC you can pass the following flag -\n --delete-data This causes a workflow to be started to remove the data files on the primary cluster deployment PVC.\n The following flag will cause all of the backup files to be removed.\n --delete-backups The data removal workflow includes the following steps -\n create a pgtask CRD to hold the PVC name and cluster name to be removed\n the CRD is watched, and on an ADD will cause a Job to be created that will run the rmdata container using the PVC name and cluster name as parameters which determine the PVC to mount, and the file path to remove under that PVC\n the rmdata Job is watched by the Operator, and upon a successful status completion the actual PVC is removed\n This workflow insures that a PVC is not removed until all the data files are removed. Also, a Job was used for the removal of files since that can be a time consuming task.\n The files are removed by the rmdata container which essentially issues the following command to remove the files -\n rm -rf /pgdata/\u0026lt;some path\u0026gt; Custom Postgres Configurations Starting in release 2.5, users and administrators can specify a custom set of Postgres configuration files be used when creating a new Postgres cluster. The configuration files you can change include -\n postgresql.conf\n pg_hba.conf\n setup.sql\n Different configurations for PostgreSQL might be defined for the following -\n OLTP types of databases\n OLAP types of databases\n High Memory\n Minimal Configuration for Development\n Project Specific configurations\n Special Security Requirements\n Global ConfigMap If you create a configMap called pgo-custom-pg-config with any of the above files within it, new clusters will use those configuration files when setting up a new database instance. You do NOT have to specify all of the configuration files. It is entirely up to your use case to determine which to use.\n An example set of configuration files and a script to create the global configMap is found at -\n $COROOT/examples/custom-config If you run the create.sh script there, it will create the configMap that will include the PostgreSQL configuration files within that directory.\n Config Files Purpose The postgresql.conf file is the main Postgresql configuration file that allows the definition of a wide variety of tuning parameters and features.\n The pg_hba.conf file is the way Postgresql secures client access.\n The setup.sql file is a Crunchy Container Suite configuration file used to initially populate the database after the initial initdb is run when the database is first created. Changes would be made to this if you wanted to define which database objects are created by default.\n Granular Config Maps Granular config maps can be defined if it is necessary to use a different set of configuration files for different clusters rather than having a single configuration (e.g. Global Config Map). A specific set of ConfigMaps with their own set of PostgreSQL configuration files can be created. When creating new clusters, a --custom-config flag can be passed along with the name of the ConfigMap which will be used for that specific cluster or set of clusters.\n Defaults If there\u0026#8217;s no reason to change the default PostgreSQL configuration files that ship with the Crunchy Postgres container, there\u0026#8217;s no requirement to. In this event, continue using the Operator as usual and avoid defining a global configMap.\n Labeling When a custom configMap is used in cluster creation, the Operator labels the primary Postgres Deployment with a label of custom-config and a value of what configMap was used when creating the database.\n Commands coming in future releases will take advantage of this labeling.\n Metrics Collection If you add a --metrics flag to pgo create cluster it will cause the crunchy-collect container to be added to your Postgres cluster.\n That container requires you run the crunchy-metrics containers as defined within the crunchy-containers project.\n The prometheus push gateway that is deployed as part of the crunchy-metrics example is a current requirement for the metrics solution. This will change in an upcoming release of the crunchy-containers project and there will no longer be a requirement for the push gateway to be deployed.\n See the crunchy-containers Metrics example for more details on setting up the crunchy-metrics solution.\n " + "content": "Table of Contents Reference Architecture Custom Resource Definitions Command Line Interface Operator Deployment CLI Design Verbs Affinity Debugging Persistent Volumes PostgreSQL Operator Deployment Strategies Strategies Specifying a Strategy Strategy Template Files Default Cluster Deployment Strategy (1) Cluster Deletion Custom Postgres Configurations Metrics Collection Manual Failover Auto Failover v3.1, 2018-06-13\n Reference Architecture So, what does the Postgres Operator actually deploy when you create a cluster?\n On this diagram, objects with dashed lines are components that are optionally deployed as part of a PostgreSQL Cluster by the operator. Objects with solid lines are the fundamental and required components.\n For example, within the Primary Deployment, the metrics container is completely optional. That component can be deployed using either the operator configuration or command line arguments if you want to cause metrics to be collected from the Postgres container.\n Replica deployments are similar to the primary deployment but are optional. A replica is not required to be created unless the capability for one is necessary. As you scale up the Postgres cluster, the standard set of components gets deployed and replication to the primary is started.\n Notice that each cluster deployment gets its own unique Persistent Volumes. Each volume can use different storage configurations which is quite powerful.\n Custom Resource Definitions Kubernetes Custom Resource Definitions are used in the design of the PostgreSQL Operator to define the following -\n Cluster - pgclusters\n Backup - pgbackups\n Upgrade - pgupgrades\n Policy - pgpolicies\n Tasks - pgtasks\n Command Line Interface The pgo command line interface (CLI) is used by a normal end-user to create databases or clusters, or make changes to existing databases.\n The CLI interacts with the apiserver REST API deployed within the postgres-operator deployment.\n From the CLI, users can view existing clusters that were deployed using the CLI and Operator. Objects that were not previously created by the Crunchy Operator are now viewable from the CLI.\n Operator Deployment The PostgreSQL Operator runs within a Deployment in the Kubernetes cluster. An administrator will deploy the operator deployment using the provided script. Once installed and running, the Operator pod will start watching for certain defined events.\n The operator watches for create/update/delete actions on the pgcluster custom resource definitions. When the CLI creates for example a new pgcluster custom resource definition, the operator catches that event and creates pods and services for that new cluster request.\n CLI Design The CLI uses the cobra package to implement CLI functionality like help text, config file processing, and command line parsing.\n The pgo client is essentially a REST client which communicates to the pgo-apiserver REST server running within the Operator pod. In some cases you might want to split the apiserver out into its own Deployment but the default deployment has a consolidated pod that contains both the apiserver and operator containers simply for convenience of deployment and updates.\n Verbs A user works with the CLI by entering verbs to indicate what they want to do, as follows.\n pgo show cluster all pgo delete cluster db1 db2 db3 pgo create cluster mycluster In the above example, the show, backup, delete, and create verbs are used. The CLI is case sensitive and supports only lowercase.\n Affinity You can have the Operator add an affinity section to a new Cluster Deployment if you want to cause Kubernetes to attempt to schedule a primary cluster to a specific Kubernetes node.\n You can see the nodes on your Kube cluster by running the following -\n kubectl get nodes You can then specify one of those names (e.g. kubeadm-node2) when creating a cluster -\n pgo create cluster thatcluster --node-name=kubeadm-node2 The affinity rule inserted in the Deployment will used a preferred strategy so that if the node were down or not available, Kube would go ahead and schedule the Pod on another node.\n You can always view the actual node your cluster pod is scheduled on through the following command.\n kubectl get pod -o wide When you scale up a Cluster and add a replica, the scaling will take into account the use of --node-name. If it sees that a cluster was created with a specific node name, then the replica Deployment will add an affinity rule to attempt to schedule the replica on a different node than the node the primary is schedule on. This provides a simple version of high availability and causes the primary and replicas to not live on the same Kubernetes node.\n Debugging To see if the operator pod is running enter the following -\n kubectl get pod -l 'name=postgres-operator' To verify the operator is running and has deployed the Custom Resources execute the following -\n kubectl get crd The full list of CRDs that are created over time are shown below.\n NAME KIND pgbackups.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io pgclusters.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io pgpolicies.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io pgpolicylogs.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io pgupgrades.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io pgtasks.cr.client-go.k8s.io CustomResourceDefinition.v1beta1.apiextensions.k8s.io Persistent Volumes Currently, the operator does not delete persistent volumes by default. Instead, it deletes the claims on the volumes. Starting with release 2.4, the Operator will create Jobs that actually run rm commands on the data volumes before actually removing the Persistent Volumes if the user passes a `--delete-data ` flag when deleting a database cluster.\n Likewise, if the user passes --delete-backups during cluster deletion a Job is created to remove all the backups for a cluster include the related Persistent Volume.\n PostgreSQL Operator Deployment Strategies This section describes the various deployment strategies offered by the operator. A deployment in this case is the set of objects created in Kubernetes when a custom resource definition of type pgcluster is created. CRDs are created by the pgo client command and acted upon by the postgres operator.\n Strategies To support different types of deployments, the operator supports multiple strategy implementations. Currently there is only a default cluster strategy.\n In the future, more deployment strategies will be supported to offer users more customization to what they see deployed in their Kubernetes cluster.\n Being open source, users can also write their own strategy!\n Specifying a Strategy In the pgo client configuration file, there is a `CLUSTER.STRATEGY `setting. The current value of the default strategy is 1. If you don\u0026#8217;t set that value, the default strategy is assumed. If you set that value to something not supported, the operator will log an error.\n Strategy Template Files Each strategy supplies its set of templates used by the operator to create new pods, services, etc.\n When the operator is deployed, part of the deployment process is to copy the required strategy templates into a ConfigMap (operator-conf) that gets mounted into /operator-conf within the operator pod.\n The directory structure of the strategy templates is as follows -\n |-- backup-job.json |-- cluster | |-- 1 | |-- cluster-deployment-1.json | |-- cluster-replica-deployment-1.json | |-- cluster-service-1.json | |-- pvc.json In this structure, each strategy\u0026#8217;s templates live in a subdirectory that matches the strategy identifier. The default strategy templates are denoted by the value of 1 in the directory structure above.\n If you add another strategy, the file names must be unique within the entire strategy directory. This is due to the way the templates are stored within the ConfigMap.\n Default Cluster Deployment Strategy (1) Using the default cluster strategy, a cluster when created by the operator will create the following on a Kubernetes cluster -\n deployment running a Postgres primary container with replica count of 1\n service mapped to the primary Postgres database\n service mapped to the replica Postgres database\n PVC for the primary will be created if not specified in configuration, this assumes you are using a non-shared volume technology (e.g. Amazon EBS), if the CLUSTER.PVC_NAME value is set in your configuration then a shared volume technology is assumed (e.g. HostPath or NFS), if a PVC is created for the primary, the naming convention is clustername where clustername is the name of your cluster.\n If you want to add a Postgres replica to a cluster, you will scale the cluster. For each replica-count, a Deployment will be created that acts as a PostgreSQL replica.\n This is very different than using a StatefulSet to scale up PostgreSQL. Why would you do it this way? Imagine a case where you want different parts of your PostgreSQL cluster to use different storage configurations,. With this method, it can be done through using specific placement and deployments of each part of the cluster.\n This same concept applies to node selection for the PostgreSQL cluster components. The Operator will let you define precisely which node that the PostgreSQL component should be placed upon using node affinity rules.\n Cluster Deletion When you run the following, the cluster and its services will be deleted. However, the data files and backup files will remain as well as the PVCs for this cluster.\n pgo delete cluster mycluster However, to remove the data files from the PVC you can pass the following flag -\n --delete-data This causes a workflow to be started to remove the data files on the primary cluster deployment PVC.\n The following flag will cause all of the backup files to be removed.\n --delete-backups The data removal workflow includes the following steps -\n create a pgtask CRD to hold the PVC name and cluster name to be removed\n the CRD is watched, and on an ADD will cause a Job to be created that will run the rmdata container using the PVC name and cluster name as parameters which determine the PVC to mount, and the file path to remove under that PVC\n the rmdata Job is watched by the Operator, and upon a successful status completion the actual PVC is removed\n This workflow insures that a PVC is not removed until all the data files are removed. Also, a Job was used for the removal of files since that can be a time consuming task.\n The files are removed by the rmdata container which essentially issues the following command to remove the files -\n rm -rf /pgdata/\u0026lt;some path\u0026gt; Custom Postgres Configurations Starting in release 2.5, users and administrators can specify a custom set of Postgres configuration files be used when creating a new Postgres cluster. The configuration files you can change include -\n postgresql.conf\n pg_hba.conf\n setup.sql\n Different configurations for PostgreSQL might be defined for the following -\n OLTP types of databases\n OLAP types of databases\n High Memory\n Minimal Configuration for Development\n Project Specific configurations\n Special Security Requirements\n Global ConfigMap If you create a configMap called pgo-custom-pg-config with any of the above files within it, new clusters will use those configuration files when setting up a new database instance. You do NOT have to specify all of the configuration files. It is entirely up to your use case to determine which to use.\n An example set of configuration files and a script to create the global configMap is found at -\n $COROOT/examples/custom-config If you run the create.sh script there, it will create the configMap that will include the PostgreSQL configuration files within that directory.\n Config Files Purpose The postgresql.conf file is the main Postgresql configuration file that allows the definition of a wide variety of tuning parameters and features.\n The pg_hba.conf file is the way Postgresql secures client access.\n The setup.sql file is a Crunchy Container Suite configuration file used to initially populate the database after the initial initdb is run when the database is first created. Changes would be made to this if you wanted to define which database objects are created by default.\n Granular Config Maps Granular config maps can be defined if it is necessary to use a different set of configuration files for different clusters rather than having a single configuration (e.g. Global Config Map). A specific set of ConfigMaps with their own set of PostgreSQL configuration files can be created. When creating new clusters, a --custom-config flag can be passed along with the name of the ConfigMap which will be used for that specific cluster or set of clusters.\n Defaults If there\u0026#8217;s no reason to change the default PostgreSQL configuration files that ship with the Crunchy Postgres container, there\u0026#8217;s no requirement to. In this event, continue using the Operator as usual and avoid defining a global configMap.\n Labeling When a custom configMap is used in cluster creation, the Operator labels the primary Postgres Deployment with a label of custom-config and a value of what configMap was used when creating the database.\n Commands coming in future releases will take advantage of this labeling.\n Metrics Collection If you add a --metrics flag to pgo create cluster it will cause the crunchy-collect container to be added to your Postgres cluster.\n That container requires you run the crunchy-metrics containers as defined within the crunchy-containers project.\n The prometheus push gateway that is deployed as part of the crunchy-metrics example is a current requirement for the metrics solution. This will change in an upcoming release of the crunchy-containers project and there will no longer be a requirement for the push gateway to be deployed.\n See the crunchy-containers Metrics example for more details on setting up the crunchy-metrics solution.\n Manual Failover With manual failover some key features include:\n when you perform a failover, a new replica is created to replace the replica that was promoted to even out the cluster to the original number of replicas\n when you perform a failover, the promoted replica is removed from the pgreplica CRD to represent the current truth\n The pgo failover --query command will return a list of replica targets which you can select from. That list include the Ready status of the database as well as the Kube node name it is running on.\n Auto Failover Starting with release 3.1, there is an auto failover mechanism that can be leveraged by pgo users if enabled.\n This feature will cause the operator to start a timer on a database primary that has received a NotReady status after the database has started. This can happen if for instance the primary database loses the connection to its database storage (e.g. gluster, NFS).\n Once the timer is started, if the primary database does not get back to a Ready status within that timer period, a failover is triggered for this cluster. The failover target is selected by the auto failover logic.\n The amount of time (in seconds) the auto failover timer will wait before triggering a failover is determined by the following pgo.yaml setting:\n AutofailSleepSeconds: 9 If the above setting is not configured a default value of 30 seconds is chose.\n The logic of auto failover works like this:\n the readiness probe on the primary database container is executed every few seonds to check the readiness of the database, this is what tells Kubernetes whether or not the container is Ready or NotReady.\n if a NotReady state is detected then that event is caught by the operator which is watching for database containers created by the operator\n upon a NotReady event, a timer is started for that database which acts as the final check as to if a failover is required for that database\n if the timer expires and the state is still Not Ready then the manual failover logic is executed for this cluster which causes a promotion of a replica to primary, and also creates a replacement replica\n only replica targets with a status of Ready will be used to select the target to promote\n The readiness probe settings are defined in the following template:\n conf/postgres-operator/cluster/1/cluster-deployment-1.json The readiness probe settings determine how often the database check is performed. See the Kubernetes documentation on readiness probes for more details on these settings.\n " }, { "uri": "https://crunchydata.github.io/postgres-operator/getting-started/", "title": "Getting Started", "tags": [], "description": "", - "content": "Table of Contents pgo Commands pgo version pgo create cluster pgo backup pgo delete backup pgo delete cluster pgo scale pgo upgrade pgo delete upgrade pgo show pvc pgo show cluster pgo test pgo create policy pgo delete policy pgo apply pgo user pgo label pgo load pgo failover pgo df pgo status v3.0, 2018-06-04\n pgo Commands Prior to using pgo, users will need to specify the postgres-operator URL as follows:\n kubectl get service postgres-operator NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE postgres-operator 10.104.47.110 \u0026lt;none\u0026gt; 8443/TCP 7m export CO_APISERVER_URL=https://10.104.47.110:8443 pgo version pgo version To see what version of pgo client and postgres-operator you are running, use the following -\n pgo version pgo create cluster To create a database, use the following -\n pgo create cluster mycluster A more complex example is to create a series of clusters such as -\n pgo create cluster xraydb --series=3 --labels=project=xray --policies=xrayapp,rlspolicy In the example above, we provision 3 clusters that have a number appended into their resulting cluster name, apply a user defined label to each cluster, and also apply user defined policies to each cluster after they are created.\n You can then view that database as -\n pgo show cluster mycluster Also, if you like to see JSON formatted output, add the -o json flag -\n pgo show cluster mycluster -o json The output will give you the current status of the database pod and the IP address of the database service. If you have psql installed on your test system you can connect to the database using the service IP address -\n psql -h 10.105.121.12 -U postgres postgres User credentials are generated through Kubernetes Secrets automatically for the testuser, primaryuser and postgres accounts. The generated passwords can be viewed by running the pgo show cluster command with the --show-secrets flag. More details are available on user management below.\n You can view all databases using the special keyword all -\n pgo show cluster all You can filter the results based on the Postgres Version -\n pgo show cluster all --version=9.6.2 You can also add metrics collection to a cluster by using the --metrics command flag as follows -\n pgo create cluster testcluster --metrics This command flag causes a crunchy-collect container to be added to the database cluster pod and enables metrics collection on that database pod. For this to work, you will need to configure the Crunchy metrics example as found in the Crunchy Container Suite.\n New clusters typically pick up the container image version to use based on the pgo configuration file\u0026#8217;s CCP_IMAGE_TAG setting. You can override this value using the --ccp-image-tag command line flag -\n pgo create cluster mycluster --ccp-image-tag=centos7-9.6.5-1.6.0 You can also add a pgpool deployment into a cluster by using the --pgpool command flag as follows:\n pgo create cluster testcluster --pgpool This will cause a crunchy-pgpool container to be started and initially configured for a cluster and the testuser cluster credential. See below for more details on running a pgpool deployment as part of your cluster.\n You can also enable archive logging into a dedicated PVC by using the --archive command flag as follows:\n pgo create cluster testcluster --archive This will cause a new PVC to be created to hold archive logs. Space is consumed by these logs but archives enable you to perform Point-In-Time-Recovery.\n pgo backup You can start a backup job for a cluster as follows:\n pgo backup mycluster You can view the backup:\n pgo show backup mycluster View the PVC folder and the backups contained therein:\n pgo show pvc mycluster-backup pgo show pvc mycluster-backup --pvc-root=mycluster-backups The output from this command is important in that it can let you copy/paste a backup snapshot path and use it for restoring a database or essentially cloning a database with an existing backup archive.\n For example, to restore a database from a backup archive:\n pgo create cluster restoredb --backup-path=mycluster-backups/2017-03-27-13-56-49 --backup-pvc=mycluster-backup --secret-from=mycluster This will create a new database called restoredb based on the backup found in mycluster-backups/2017-03-27-13-56-49 and the secrets of the mycluster cluster.\n Selectors can be used to perform backups as well, for example:\n pgo backup --selector=project=xray In this example, any cluster that matches the selector will cause a backup job to be created.\n When you request a backup, pgo will prompt you if you want to proceed because this action will delete any existing backup job for this cluster that might exist. The backup files will still be left intact but the actual Kubernetes Job will be removed prior to creating a new Job with the same name.\n pgo delete backup To delete a backup enter the following:\n pgo delete backup mycluster pgo delete cluster You can remove a cluster by running:\n pgo delete cluster restoredb Note, that this command will not remove the PVC associated with this cluster.\n Selectors also apply to the delete command as follows:\n pgo delete cluster --selector=project=xray This command will cause any cluster matching the selector to be removed.\n You can remove a cluster and it\u0026#8217;s data files by running:\n pgo delete cluster restoredb --delete-data You can remove a cluster, it\u0026#8217;s data files, and all backups by running:\n pgo delete cluster restoredb --delete-data --delete-backups When you specify a destructive delete like above, you will be prompted to make sure this is what you want to do. If you don\u0026#8217;t want to be prompted you can enter the --no-prompt command line flag.\n pgo scale When you create a Cluster, you will see in the output a variety of Kubernetes objects were created including:\n a Deployment holding the primary PostgreSQL database\n a Deployment holding the replica PostgreSQL database\n a service for the primary database\n a service for the replica databases\n Since Postgres is a single-primary database by design, the primary Deployment is set to a replica count of 1, it can not scale beyond 1.\n With Postgres, you can any n-number of replicas each of which connect to the primary forming a streaming replication postgres cluster. The Postgres replicas are read-only, whereas the primary is read-write. To create a Postgres replica enter a command such as:\n pgo scale mycluster The pgo scale command is additive, in that each time you execute it, it will create another replica which is added to the Postgres cluster.\n There are 2 service connections available to the PostgreSQL cluster. One is to the primary database which allows read-write SQL processing, and the other is to the set of read-only replica databases. The replica service performs round-robin load balancing to the replica databases.\n You can connect to the primary database and verify that it is replicating to the replica databases as follows:\n psql -h 10.107.180.159 -U postgres postgres -c 'table pg_stat_replication' You can view all clusters using the special keyword all:\n pgo show cluster all You can filter the results by Postgres version:\n pgo show cluster all --version=9.6.2 The scale command will let you specify a --node-label flag which can be used to influence what Kube node the replica will be scheduled upon.\n pgo scale mycluster --node-label=speed=fast If you don\u0026#8217;t specify a --node-label flag, a node affinity rule of NotIn will be specified to prefer that the replica be schedule on a node that the primary is not running on.\n You can also dictate what container resource and storage configurations will be used for a replica by passing in extra command flags:\n pgo scale mycluster --storage-config=storage1 --resources-config=small pgo upgrade You can perform a minor Postgres version upgrade of either a database or cluster as follows:\n pgo upgrade mycluster When you run this command, it will cause the operator to delete the existing containers of the database or cluster and recreate them using the currently defined Postgres container image specified in your pgo configuration file.\n The database data files remain untouched, only the container is updated, this will upgrade your Postgres server version only.\n You can perform a major Postgres version upgrade of either a database or cluster as follows:\n pgo upgrade mycluster --upgrade-type=major When you run this command, it will cause the operator to delete the existing containers of the database or cluster and recreate them using the currently defined Postgres container image specified in your pgo configuration file.\n The database data files are converted to the new major Postgres version as specified by the current Postgres image version in your pgo configuration file.\n In this scenario, the upgrade is performed by the Postgres pg_upgrade utility which is containerized in the crunchydata/crunchy-upgrade container. The operator will create a Job which runs the upgrade container, using the existing Postgres database files as input, and output the updated database files to a new PVC.\n Once the upgrade job is completed, the operator will create the original database or cluster container mounted with the new PVC which contains the upgraded database files.\n As the upgrade is processed, the status of the pgupgrade CRD is updated to give the user some insight into how the upgrade is proceeding. Upgrades like this can take a long time if your database is large. The operator creates a watch on the upgrade job to know when and how to proceed.\n Likewise, you can upgrade the cluster using a command line flag:\n pgo upgrade mycluster --ccp-image-tag=centos7-9.6.9-1.8.3 pgo upgrade mycluster --upgrade-type=major --ccp-image-tag=centos7-9.6.9-1.8.3 pgo delete upgrade To remove an upgrade CRD, issue the following:\n pgo delete upgrade pgo show pvc You can view the files on a PVC as follows:\n pgo show pvc mycluster In this example, the PVC is mycluster. This command is useful in some cases to examine what files are on a given PVC.\n In the case where you want to list a specific path on a PVC you can specify the path option as follows:\n pgo show pvc mycluster --pvc-root=mycluster-backups You can also list all PVCs that are created by the operator using:\n pgo show pvc all pgo show cluster You can view the passwords used by the cluster as follows:\n pgo show cluster mycluster --show-secrets=true Passwords are generated if not specified in your pgo configuration.\n pgo test You can test the database connections to a cluster:\n pgo test mycluster This command will test each service defined for the cluster using the postgres, primary, and normal user accounts defined for the cluster. The cluster credentials are accessed and used to test the database connections. The equivalent psql command is printed out as connections are tried, along with the connection status.\n Like other commands, you can use the selector to test a series of clusters:\n pgo test --selector=env=research pgo test all You can get output using the --output flag:\n pgo test all -o json pgo create policy To create a policy use the following syntax:\n pgo create policy policy1 --in-file=/tmp/policy1.sql pgo create policy policy1 --url=https://someurl/policy1.sql When you execute this command, it will create a policy named policy1 using the input file /tmp/policy1.sql as input. It will create on the server a PgPolicy CRD with the name policy1 that you can examine as follows:\n kubectl get pgpolicies policy1 -o json Policies get automatically applied to any cluster you create if you define in your pgo.yaml configuration a CLUSTER.POLICIES value. Policy SQL is executed as the postgres user.\n To view policies:\n pgo show policy all pgo delete policy To delete a policy use the following form:\n pgo delete policy policy1 pgo apply To apply an existing policy to a set of clusters, issue a command like this:\n pgo apply policy1 --selector=name=mycluster When you execute this command, it will look up clusters that have a label value of name=mycluster and then it will apply the policy1 label to that cluster and execute the policy SQL against that cluster using the postgres user account.\n Policies are executed as the superuser or postgres user in PostgreSQL. These should therefore be exercised with caution.\n If you want to view the clusters than have a specific policy applied to them, you can use the --selector flag as follows to filter on a policy name (e.g. policy1):\n pgo show cluster --selector=policy1=pgpolicy pgo user To create a new Postgres user to the mycluster cluster, execute:\n pgo create user sally --selector=name=mycluster To delete a Postgres user in the mycluster cluster, execute:\n pgo user --delete-user=sally --selector=name=mycluster To delete that user in all clusters:\n pgo user --delete-user=sally To change the password for a user in the mycluster cluster:\n pgo user --change-password=sally --selector=name=mycluster The password is generated and applied to the user sally.\n To see user passwords that have expired past a certain number of days in the mycluster cluster:\n pgo user --expired=7 --selector=name=mycluster To assign users to a cluster:\n pgo create user user1 --valid-days=30 --managed --db=userdb --selector=name=xraydb1 In this example, a user named user1 is created with a valid until password date set to expire in 30 days. That user will be granted access to the userdb database. This user account also will have an associated secret created to hold the password that was generated for this user. Any clusters that match the selector value will have this user created on it.\n To change a user password:\n pgo user --change-password=user1 --valid-days=10 --selector=name=xray1 In this example, a user named user1 has its password changed to a generated value and the valid until expiration date set to 10 days from now, this command will take effect across all clusters that match the selector. If you specify valid-days=-1 it will mean the password will not expire (e.g. infinity).\n To drop a user:\n pgo user --delete-user=user3 --selector=project=xray To see which passwords are set to expire in a given number of days:\n pgo user --expired=10 --selector=project=xray In this example, any clusters that match the selector are queried to see if any users are set to expire in 10 days.\n To update expired passwords in a cluster:\n pgo user --update-passwords --selector=name=mycluster pgo label You can apply a user defined label to a cluster as follows:\n pgo label --label=env=research --selector=project=xray In this example, we apply a label of env=research to any clusters that have an existing label of project=xray applied.\n pgo load A CSV file loading capability is supported currently. You can test that by creating a SQL Policy which will create a database table that will be loaded with the CSV data. For example:\n pgo create policy xrayapp --in-file=$COROOT/examples/policy/xrayapp.sql Then you can load a sample CSV file into a database as follows:\n pgo load --load-config=$COROOT/examples/sample-load-config.json --selector=name=mycluster The loading is based on a load definition found in the sample-load-config.json file. In that file, the data to be loaded is specified. When the pgo load command is executed, Jobs will be created to perform the loading for each cluster that matches the selector filter.\n If you include the --policies flag, any specified policies will be applied prior to the data being loaded. For example:\n pgo load --policies=\"rlspolicy,xrayapp\" --load-config=$COROOT/examples/sample-load-config.json --selector=name=mycluster Likewise you can load a sample json file into a database as follows:\n pgo load --policies=jsonload --load-config=$COROOT/examples/sample-json-load-config.json --selector=name=mycluster The load configuration file has the following YAML attributes:\n Table 1. Load Configuration File Definitions Attribute Description COImagePrefix\n the pgo-load image prefix to use for the load job\n COImageTag\n the pgo-load image tag to use for the load job\n DbDatabase\n the database schema to use for loading the data\n DbUser\n the database user to use for loading the data\n DbPort\n the database port of the database to load\n TableToLoad\n the PostgreSQL table to load\n FilePath\n the name of the file to be loaded\n FileType\n either csv or json, determines the type of data to be loaded\n PVCName\n the name of the PVC that holds the data file to be loaded\n SecurityContext\n either fsGroup or SupplementalGroup values\n pgo failover Starting with Release 2.6, there is a manual failover command which can be used to promote a replica to a primary role in a PostgreSQL cluster.\n This process includes the following actions: * pick a target replica to become the new primary * delete the current primary deployment to avoid user requests from going to multiple primary databases (split brain) * promote the targeted replica using pg_ctl promote, this will cause PostgreSQL to go into read-write mode * re-label the targeted replica to use the primary labels, this will match the primary service selector and cause new requests to the primary to be routed to the new primary (targeted replica)\n The command works like this:\n pgo failover mycluster --query That command will show you a list of replica targets you can choose to failover to. You will select one of those for the following command:\n pgo failover mycluster --target=mycluster-abxq There is a CRD called pgtask that will hold the failover request and also the status of that request. You can view the status by viewing it:\n kubectl get pgtasks mycluster-failover -o yaml Once completed, you will see a new replica has been started to replace the promoted replica, this happens automatically due to the re-lable, the Deployment will recreate its pod because of this. The failover typically takes only a few seconds, however, the creation of the replacement replica can take longer depending on how much data is being replicated.\n pgo df You can use the pgo df command to see the disk capacity of a cluster\u0026#8217;s PVC versus that of the PostgreSQL data that has been written to disk. If the capacity is less than 50% then the output is printed in red to alert the user.\n Run the command as follows:\n pgo df mycluster pgo df --selector=name=mycluster pgo df --selector=name=hang CLUSTER STATUS PGSIZE CAPACITY PCTUSED mycluster up 30 MB 1Gi 2 pgo status You can use the pgo status command to see overall pgo status. Selective metrics are displayed to provide some insights to the pgo user and administrator as to what is running currently in this namespace related to pgo.\n Run the command as follows:\n pgo status Operator Start: 2018-05-02 15:59:41 +0000 UTC Databases: 2 Backups: 2 Claims: 18 Total Volume Size: 18Gi Database Images: 4\tcrunchydata/crunchy-postgres:centos7-10.4-1.8.3 Databases Not Ready: " + "content": "Table of Contents pgo Commands pgo version pgo create cluster pgo backup pgo delete backup pgo delete cluster pgo scale pgo upgrade pgo delete upgrade pgo show pvc pgo show cluster pgo test pgo create policy pgo delete policy pgo apply pgo user pgo label pgo load pgo failover pgo df pgo status v3.1, 2018-06-13\n pgo Commands Prior to using pgo, users will need to specify the postgres-operator URL as follows:\n kubectl get service postgres-operator NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE postgres-operator 10.104.47.110 \u0026lt;none\u0026gt; 8443/TCP 7m export CO_APISERVER_URL=https://10.104.47.110:8443 pgo version pgo version To see what version of pgo client and postgres-operator you are running, use the following -\n pgo version pgo create cluster To create a database, use the following -\n pgo create cluster mycluster A more complex example is to create a series of clusters such as -\n pgo create cluster xraydb --series=3 --labels=project=xray --policies=xrayapp,rlspolicy In the example above, we provision 3 clusters that have a number appended into their resulting cluster name, apply a user defined label to each cluster, and also apply user defined policies to each cluster after they are created.\n You can then view that database as -\n pgo show cluster mycluster Also, if you like to see JSON formatted output, add the -o json flag -\n pgo show cluster mycluster -o json The output will give you the current status of the database pod and the IP address of the database service. If you have psql installed on your test system you can connect to the database using the service IP address -\n psql -h 10.105.121.12 -U postgres postgres User credentials are generated through Kubernetes Secrets automatically for the testuser, primaryuser and postgres accounts. The generated passwords can be viewed by running the pgo show cluster command with the --show-secrets flag. More details are available on user management below.\n You can view all databases using the special keyword all -\n pgo show cluster all You can filter the results based on the Postgres Version -\n pgo show cluster all --version=9.6.2 You can also add metrics collection to a cluster by using the --metrics command flag as follows -\n pgo create cluster testcluster --metrics This command flag causes a crunchy-collect container to be added to the database cluster pod and enables metrics collection on that database pod. For this to work, you will need to configure the Crunchy metrics example as found in the Crunchy Container Suite.\n New clusters typically pick up the container image version to use based on the pgo configuration file\u0026#8217;s CCP_IMAGE_TAG setting. You can override this value using the --ccp-image-tag command line flag -\n pgo create cluster mycluster --ccp-image-tag=centos7-9.6.5-1.6.0 You can also add a pgpool deployment into a cluster by using the --pgpool command flag as follows:\n pgo create cluster testcluster --pgpool This will cause a crunchy-pgpool container to be started and initially configured for a cluster and the testuser cluster credential. See below for more details on running a pgpool deployment as part of your cluster.\n You can also enable archive logging into a dedicated PVC by using the --archive command flag as follows:\n pgo create cluster testcluster --archive This will cause a new PVC to be created to hold archive logs. Space is consumed by these logs but archives enable you to perform Point-In-Time-Recovery.\n To enable auto failover on this cluster, use the following flag:\n pgo create cluster testcluster --autofail auto failover when set on the cluster informs the operator to look or watch for NotReady events on this cluster, and when those occur to create a failover state machine which acts as a timer for the cluster. If the timer expires, then a failover is triggered on the cluster turning one of the cluster replica pods into the replacement primary pod. See the How It Works documentation for more details on auto failover.\n pgo backup You can start a backup job for a cluster as follows:\n pgo backup mycluster You can view the backup:\n pgo show backup mycluster View the PVC folder and the backups contained therein:\n pgo show pvc mycluster-backup pgo show pvc mycluster-backup --pvc-root=mycluster-backups The output from this command is important in that it can let you copy/paste a backup snapshot path and use it for restoring a database or essentially cloning a database with an existing backup archive.\n For example, to restore a database from a backup archive:\n pgo create cluster restoredb --backup-path=mycluster-backups/2017-03-27-13-56-49 --backup-pvc=mycluster-backup --secret-from=mycluster This will create a new database called restoredb based on the backup found in mycluster-backups/2017-03-27-13-56-49 and the secrets of the mycluster cluster.\n Selectors can be used to perform backups as well, for example:\n pgo backup --selector=project=xray In this example, any cluster that matches the selector will cause a backup job to be created.\n When you request a backup, pgo will prompt you if you want to proceed because this action will delete any existing backup job for this cluster that might exist. The backup files will still be left intact but the actual Kubernetes Job will be removed prior to creating a new Job with the same name.\n You can override the PVC used by the backup job with the following:\n pgo backup mycluster --pvc-name=myremotepvc This might be useful for special backup cases, perhaps to create a backup on a disaster recovery PVC.\n pgo delete backup To delete a backup enter the following:\n pgo delete backup mycluster pgo delete cluster You can remove a cluster by running:\n pgo delete cluster restoredb Note, that this command will not remove the PVC associated with this cluster.\n Selectors also apply to the delete command as follows:\n pgo delete cluster --selector=project=xray This command will cause any cluster matching the selector to be removed.\n You can remove a cluster and it\u0026#8217;s data files by running:\n pgo delete cluster restoredb --delete-data You can remove a cluster, it\u0026#8217;s data files, and all backups by running:\n pgo delete cluster restoredb --delete-data --delete-backups When you specify a destructive delete like above, you will be prompted to make sure this is what you want to do. If you don\u0026#8217;t want to be prompted you can enter the --no-prompt command line flag.\n pgo scale When you create a Cluster, you will see in the output a variety of Kubernetes objects were created including:\n a Deployment holding the primary PostgreSQL database\n a Deployment holding the replica PostgreSQL database\n a service for the primary database\n a service for the replica databases\n Since Postgres is a single-primary database by design, the primary Deployment is set to a replica count of 1, it can not scale beyond 1.\n With Postgres, you can any n-number of replicas each of which connect to the primary forming a streaming replication postgres cluster. The Postgres replicas are read-only, whereas the primary is read-write. To create a Postgres replica enter a command such as:\n pgo scale mycluster The pgo scale command is additive, in that each time you execute it, it will create another replica which is added to the Postgres cluster.\n There are 2 service connections available to the PostgreSQL cluster. One is to the primary database which allows read-write SQL processing, and the other is to the set of read-only replica databases. The replica service performs round-robin load balancing to the replica databases.\n You can connect to the primary database and verify that it is replicating to the replica databases as follows:\n psql -h 10.107.180.159 -U postgres postgres -c 'table pg_stat_replication' You can view all clusters using the special keyword all:\n pgo show cluster all You can filter the results by Postgres version:\n pgo show cluster all --version=9.6.2 The scale command will let you specify a --node-label flag which can be used to influence what Kube node the replica will be scheduled upon.\n pgo scale mycluster --node-label=speed=fast If you don\u0026#8217;t specify a --node-label flag, a node affinity rule of NotIn will be specified to prefer that the replica be schedule on a node that the primary is not running on.\n You can also dictate what container resource and storage configurations will be used for a replica by passing in extra command flags:\n pgo scale mycluster --storage-config=storage1 --resources-config=small pgo upgrade You can perform a minor Postgres version upgrade of either a database or cluster as follows:\n pgo upgrade mycluster When you run this command, it will cause the operator to delete the existing containers of the database or cluster and recreate them using the currently defined Postgres container image specified in your pgo configuration file.\n The database data files remain untouched, only the container is updated, this will upgrade your Postgres server version only.\n You can perform a major Postgres version upgrade of either a database or cluster as follows:\n pgo upgrade mycluster --upgrade-type=major When you run this command, it will cause the operator to delete the existing containers of the database or cluster and recreate them using the currently defined Postgres container image specified in your pgo configuration file.\n The database data files are converted to the new major Postgres version as specified by the current Postgres image version in your pgo configuration file.\n In this scenario, the upgrade is performed by the Postgres pg_upgrade utility which is containerized in the crunchydata/crunchy-upgrade container. The operator will create a Job which runs the upgrade container, using the existing Postgres database files as input, and output the updated database files to a new PVC.\n Once the upgrade job is completed, the operator will create the original database or cluster container mounted with the new PVC which contains the upgraded database files.\n As the upgrade is processed, the status of the pgupgrade CRD is updated to give the user some insight into how the upgrade is proceeding. Upgrades like this can take a long time if your database is large. The operator creates a watch on the upgrade job to know when and how to proceed.\n Likewise, you can upgrade the cluster using a command line flag:\n pgo upgrade mycluster --ccp-image-tag=centos7-9.6.9-1.8.3 pgo upgrade mycluster --upgrade-type=major --ccp-image-tag=centos7-9.6.9-1.8.3 pgo delete upgrade To remove an upgrade CRD, issue the following:\n pgo delete upgrade pgo show pvc You can view the files on a PVC as follows:\n pgo show pvc mycluster In this example, the PVC is mycluster. This command is useful in some cases to examine what files are on a given PVC.\n In the case where you want to list a specific path on a PVC you can specify the path option as follows:\n pgo show pvc mycluster --pvc-root=mycluster-backups You can also list all PVCs that are created by the operator using:\n pgo show pvc all pgo show cluster You can view the passwords used by the cluster as follows:\n pgo show cluster mycluster --show-secrets=true Passwords are generated if not specified in your pgo configuration.\n pgo test You can test the database connections to a cluster:\n pgo test mycluster This command will test each service defined for the cluster using the postgres, primary, and normal user accounts defined for the cluster. The cluster credentials are accessed and used to test the database connections. The equivalent psql command is printed out as connections are tried, along with the connection status.\n Like other commands, you can use the selector to test a series of clusters:\n pgo test --selector=env=research pgo test all You can get output using the --output flag:\n pgo test all -o json pgo create policy To create a policy use the following syntax:\n pgo create policy policy1 --in-file=/tmp/policy1.sql pgo create policy policy1 --url=https://someurl/policy1.sql When you execute this command, it will create a policy named policy1 using the input file /tmp/policy1.sql as input. It will create on the server a PgPolicy CRD with the name policy1 that you can examine as follows:\n kubectl get pgpolicies policy1 -o json Policies get automatically applied to any cluster you create if you define in your pgo.yaml configuration a CLUSTER.POLICIES value. Policy SQL is executed as the postgres user.\n To view policies:\n pgo show policy all pgo delete policy To delete a policy use the following form:\n pgo delete policy policy1 pgo apply To apply an existing policy to a set of clusters, issue a command like this:\n pgo apply policy1 --selector=name=mycluster When you execute this command, it will look up clusters that have a label value of name=mycluster and then it will apply the policy1 label to that cluster and execute the policy SQL against that cluster using the postgres user account.\n Policies are executed as the superuser or postgres user in PostgreSQL. These should therefore be exercised with caution.\n If you want to view the clusters than have a specific policy applied to them, you can use the --selector flag as follows to filter on a policy name (e.g. policy1):\n pgo show cluster --selector=policy1=pgpolicy pgo user To create a new Postgres user to the mycluster cluster, execute:\n pgo create user sally --selector=name=mycluster To delete a Postgres user in the mycluster cluster, execute:\n pgo user delete user sally --selector=name=mycluster To change the password for a user in the mycluster cluster:\n pgo user --change-password=sally --selector=name=mycluster The password is generated and applied to the user sally.\n To see user passwords that have expired past a certain number of days in the mycluster cluster:\n pgo user --expired=7 --selector=name=mycluster To assign users to a cluster:\n pgo create user user1 --valid-days=30 --managed --db=userdb --selector=name=xraydb1 In this example, a user named user1 is created with a valid until password date set to expire in 30 days. That user will be granted access to the userdb database. This user account also will have an associated secret created to hold the password that was generated for this user. Any clusters that match the selector value will have this user created on it.\n To change a user password:\n pgo user --change-password=user1 --valid-days=10 --selector=name=xray1 In this example, a user named user1 has its password changed to a generated value and the valid until expiration date set to 10 days from now, this command will take effect across all clusters that match the selector. If you specify valid-days=-1 it will mean the password will not expire (e.g. infinity).\n To see which passwords are set to expire in a given number of days:\n pgo user --expired=10 --selector=project=xray In this example, any clusters that match the selector are queried to see if any users are set to expire in 10 days.\n To update expired passwords in a cluster:\n pgo user --update-passwords --selector=name=mycluster pgo label You can apply a user defined label to a cluster as follows:\n pgo label --label=env=research --selector=project=xray In this example, we apply a label of env=research to any clusters that have an existing label of project=xray applied.\n pgo load A CSV file loading capability is supported currently. You can test that by creating a SQL Policy which will create a database table that will be loaded with the CSV data. For example:\n pgo create policy xrayapp --in-file=$COROOT/examples/policy/xrayapp.sql Then you can load a sample CSV file into a database as follows:\n pgo load --load-config=$COROOT/examples/sample-load-config.json --selector=name=mycluster The loading is based on a load definition found in the sample-load-config.json file. In that file, the data to be loaded is specified. When the pgo load command is executed, Jobs will be created to perform the loading for each cluster that matches the selector filter.\n If you include the --policies flag, any specified policies will be applied prior to the data being loaded. For example:\n pgo load --policies=\"rlspolicy,xrayapp\" --load-config=$COROOT/examples/sample-load-config.json --selector=name=mycluster Likewise you can load a sample json file into a database as follows:\n pgo load --policies=jsonload --load-config=$COROOT/examples/sample-json-load-config.json --selector=name=mycluster The load configuration file has the following YAML attributes:\n Table 1. Load Configuration File Definitions Attribute Description COImagePrefix\n the pgo-load image prefix to use for the load job\n COImageTag\n the pgo-load image tag to use for the load job\n DbDatabase\n the database schema to use for loading the data\n DbUser\n the database user to use for loading the data\n DbPort\n the database port of the database to load\n TableToLoad\n the PostgreSQL table to load\n FilePath\n the name of the file to be loaded\n FileType\n either csv or json, determines the type of data to be loaded\n PVCName\n the name of the PVC that holds the data file to be loaded\n SecurityContext\n either fsGroup or SupplementalGroup values\n pgo failover Starting with Release 2.6, there is a manual failover command which can be used to promote a replica to a primary role in a PostgreSQL cluster.\n This process includes the following actions:\n pick a target replica to become the new primary\n delete the current primary deployment to avoid user requests from going to multiple primary databases (split brain)\n promote the targeted replica using pg_ctl promote, this will cause PostgreSQL to go into read-write mode\n re-label the targeted replica to use the primary labels, this will match the primary service selector and cause new requests to the primary to be routed to the new primary (targeted replica)\n The command works like this:\n pgo failover mycluster --query That command will show you a list of replica targets you can choose to failover to. You will select one of those for the following command:\n pgo failover mycluster --target=mycluster-abxq There is a CRD called pgtask that will hold the failover request and also the status of that request. You can view the status by viewing it:\n kubectl get pgtasks mycluster-failover -o yaml Once completed, you will see a new replica has been started to replace the promoted replica, this happens automatically due to the re-lable, the Deployment will recreate its pod because of this. The failover typically takes only a few seconds, however, the creation of the replacement replica can take longer depending on how much data is being replicated.\n pgo df You can use the pgo df command to see the disk capacity of a cluster\u0026#8217;s PVC versus that of the PostgreSQL data that has been written to disk. If the capacity is less than 50% then the output is printed in red to alert the user.\n Run the command as follows:\n pgo df mycluster pgo df --selector=name=mycluster pgo df --selector=name=hang CLUSTER STATUS PGSIZE CAPACITY PCTUSED mycluster up 30 MB 1Gi 2 pgo status You can use the pgo status command to see overall pgo status. Selective metrics are displayed to provide some insights to the pgo user and administrator as to what is running currently in this namespace related to pgo.\n Run the command as follows:\n pgo status Operator Start: 2018-05-02 15:59:41 +0000 UTC Databases: 2 Backups: 2 Claims: 18 Total Volume Size: 18Gi Database Images: 4\tcrunchydata/crunchy-postgres:centos7-10.4-1.8.3 Databases Not Ready: " }, { "uri": "https://crunchydata.github.io/postgres-operator/", "title": "Crunchy Data PostgreSQL Operator", "tags": [], "description": "", - "content": " v2.6, 2018-06-04\n Documentation Please view the official Crunchy Data PostgreSQL Operator documentation here. If you are interested in contributing or making an update to the documentation, please view the Contributing Guidelines.\n What is the Operator? The postgres-operator is a controller that runs within a Kubernetes cluster that provides a means to deploy and manage PostgreSQL clusters.\n Use the postgres-operator to -\n deploy PostgreSQL containers including streaming replication clusters\n scale up PostgreSQL clusters with extra replicas\n add pgpool and metrics sidecars to PostgreSQL clusters\n apply SQL policies to PostgreSQL clusters\n assign metadata tags to PostgreSQL clusters\n maintain PostgreSQL users and passwords\n perform minor and major upgrades to PostgreSQL clusters\n load simple CSV and JSON files into PostgreSQL clusters\n perform database backups\n Design The postgres-operator design incorporates the following concepts -\n adds Custom Resource Definitions for PostgreSQL to Kubernetes\n adds controller logic that watches events on PostgreSQL resources\n provides a command line client (pgo) and REST API for interfacing with the postgres-operator\n provides for very customized deployments including container resources, storage configurations, and PostgreSQL custom configurations\n More design information is found on the How It Works page.\n Requirements The postgres-operator runs on any Kubernetes and Openshift platform that supports Custom Resource Definitions.\n The Operator project builds and operates with the following containers -\n PVC Listing Container\n Remove Data Container\n postgres-operator Container\n apiserver Container\n file load Container\n This Operator is developed and tested on the following operating systems but is known to run on other operating systems -\n CentOS 7\n RHEL 7\n Installation To build and deploy the Operator on your Kubernetes system, follow the instructions documented on the Installation page.\n If you\u0026#8217;re seeking to upgrade your existing Operator installation, please visit the Upgrading the Operator page.\n Configuration The operator is template-driven; this makes it simple to configure both the client and the operator. The configuration options are documented on the Configuration page.\n Getting Started postgres-operator commands are documented on the Getting Started page.\n " + "content": " v3.1, 2018-06-13\n Documentation Please view the official Crunchy Data PostgreSQL Operator documentation here. If you are interested in contributing or making an update to the documentation, please view the Contributing Guidelines.\n What is the Operator? The postgres-operator is a controller that runs within a Kubernetes cluster that provides a means to deploy and manage PostgreSQL clusters.\n Use the postgres-operator to -\n deploy PostgreSQL containers including streaming replication clusters\n scale up PostgreSQL clusters with extra replicas\n add pgpool and metrics sidecars to PostgreSQL clusters\n apply SQL policies to PostgreSQL clusters\n assign metadata tags to PostgreSQL clusters\n maintain PostgreSQL users and passwords\n perform minor and major upgrades to PostgreSQL clusters\n load simple CSV and JSON files into PostgreSQL clusters\n perform database backups\n Design The postgres-operator design incorporates the following concepts -\n adds Custom Resource Definitions for PostgreSQL to Kubernetes\n adds controller logic that watches events on PostgreSQL resources\n provides a command line client (pgo) and REST API for interfacing with the postgres-operator\n provides for very customized deployments including container resources, storage configurations, and PostgreSQL custom configurations\n More design information is found on the How It Works page.\n Requirements The postgres-operator runs on any Kubernetes and Openshift platform that supports Custom Resource Definitions.\n The Operator project builds and operates with the following containers -\n PVC Listing Container\n Remove Data Container\n postgres-operator Container\n apiserver Container\n file load Container\n This Operator is developed and tested on the following operating systems but is known to run on other operating systems -\n CentOS 7\n RHEL 7\n Installation To build and deploy the Operator on your Kubernetes system, follow the instructions documented on the Installation page.\n If you\u0026#8217;re seeking to upgrade your existing Operator installation, please visit the Upgrading the Operator page.\n Configuration The operator is template-driven; this makes it simple to configure both the client and the operator. The configuration options are documented on the Configuration page.\n Getting Started postgres-operator commands are documented on the Getting Started page.\n " }, { "uri": "https://crunchydata.github.io/postgres-operator/categories/", diff --git a/docs/index.xml b/docs/index.xml index 9bbb5f56ce..e79220b4c4 100644 --- a/docs/index.xml +++ b/docs/index.xml @@ -17,7 +17,7 @@ Thu, 26 Apr 2018 15:22:14 -0700 https://crunchydata.github.io/postgres-operator/installation/quick-installation/ - Table of Contents Overview Quickstart GKE/PKS Openshift Container Platform Next Steps v3.0, 2018-06-04 + Table of Contents Overview Quickstart GKE/PKS Openshift Container Platform Next Steps v3.1, 2018-06-13 Overview There are currently quickstart scripts that seek to automate the deployment to popular Kubernetes environments - quickstart-for-gke.sh quickstart-for-ocp.sh @@ -31,9 +31,10 @@ Thu, 26 Apr 2018 15:22:21 -0700 https://crunchydata.github.io/postgres-operator/installation/manual-installation/ - Table of Contents Project Structure Installation Prerequsites Basic Installation Create HostPath Directory Build Images & Deploy Makefile Targets Next Steps v3.0, 2018-06-04 - Project Structure To perform an installation of the operator, first create the project structure as follows on your host, here we assume a local directory called odev - - export GOPATH=$HOME/odev mkdir -p $HOME/odev/src $HOME/odev/bin $HOME/odev/pkg mkdir -p $GOPATH/src/github.com/crunchydata/ Next, get a tagged release of the source code - + Table of Contents Project Structure Installation Prerequsites Basic Installation Create HostPath Directory Build Images & Deploy Makefile Targets Next Steps v3.1, 2018-06-13 + Project Structure First, define the following environment variables in .bashrc: + export GOPATH=$HOME/odev export GOBIN=$GOPATH/bin export CO_NAMESPACE=demo export CO_CMD=kubectl export COROOT=$GOPATH/src/github.com/crunchydata/postgres-operator export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.1 export CO_BASEOS=centos7 When deploying on Openshift Container Platform, the CO_CMD environment variable should be: + export CO_CMD=oc To perform an installation of the operator, first create the project structure as follows on your host, here we assume a local directory called odev - @@ -42,31 +43,19 @@ Thu, 26 Apr 2018 15:24:16 -0700 https://crunchydata.github.io/postgres-operator/installation/helm-chart/ - v3.0, 2018-06-04 + v3.1, 2018-06-13 Helm Chart First, pull prebuilt versions from Dockerhub of the postgres-operator containers, specify the image versions, and execute the following Makefile target - - export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.0 make pull Then, build and deploy the operator using the provided Helm chart - + export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.1 make pull Then, build and deploy the operator using the provided Helm chart - cd $COROOT/chart helm install ./postgres-operator helm ls Next Steps Next, visit the Deployment page to deploy the Operator, verify the installation, and view various storage configurations. - - Upgrading the Operator - https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/ - Tue, 24 Apr 2018 18:27:30 -0700 - - https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/ - v3.0, 2018-06-04 - Upgrading from v2.4 to v2.5 For a full list of additions and revisions that occurred in the PostgreSQL Operator v2.5 release, please view the related release page here. - Required Updates This section notes some required steps that will need to be taken in the process of upgrading from v2.4 to v2.5. - Configuration File It will be necessary to update your existing pgo. - - Deployment https://crunchydata.github.io/postgres-operator/installation/deployment/ Thu, 26 Apr 2018 15:26:40 -0700 https://crunchydata.github.io/postgres-operator/installation/deployment/ - Table of Contents Verify Operator Status Configure pgo Client Verify pgo Client Storage Configuration Next Steps v3.0, 2018-06-04 + Table of Contents Verify Operator Status Configure pgo Client Verify pgo Client Storage Configuration Next Steps v3.1, 2018-06-13 This document details verifying the installation of the PostgreSQL Operator is successful, in addition to detailing some different storage configurations that can be made. Verify Operator Status To verify that the operator is deployed and running, run the following: kubectl get pod --selector=name=postgres-operator You should see output similar to this: @@ -78,10 +67,22 @@ Tue, 24 Apr 2018 18:26:56 -0700 https://crunchydata.github.io/postgres-operator/installation/configuration/ - Table of Contents Overview Openshift Container Platform Security Configuration Kube RBAC Basic Authentication Configure TLS pgo RBAC apiserver Configuration postgres-operator Container Configuration bash Completion REST API Deploying pgpool v3.0, 2018-06-04 + Table of Contents Overview Openshift Container Platform Security Configuration Kube RBAC Basic Authentication Configure TLS pgo RBAC apiserver Configuration postgres-operator Container Configuration bash Completion REST API Deploying pgpool v3.1, 2018-06-13 Overview This document describes how to configure the operator beyond the default configurations in addition to detailing what the configuration settings mean. Openshift Container Platform To run the Operator on Openshift Container Platform note the following requirements - + + Upgrading the Operator + https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/ + Tue, 24 Apr 2018 18:27:30 -0700 + + https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/ + v3.1, 2018-06-13 + Upgrading from v2.4 to v2.5 For a full list of additions and revisions that occurred in the PostgreSQL Operator v2.5 release, please view the related release page here. + Required Updates This section notes some required steps that will need to be taken in the process of upgrading from v2.4 to v2.5. + Configuration File It will be necessary to update your existing pgo. + + \ No newline at end of file diff --git a/docs/installation/configuration/index.html b/docs/installation/configuration/index.html index 916f2ad092..9859e14e3a 100644 --- a/docs/installation/configuration/index.html +++ b/docs/installation/configuration/index.html @@ -5,7 +5,7 @@ - + Configuration :: Crunchy Data PostgreSQL Operator Documentation @@ -92,13 +92,6 @@ Helm Chart
    - -
  • -
    - - Upgrading the Operator - -
  • @@ -113,6 +106,13 @@ Configuration
    +
  • +
  • +
    + + Upgrading the Operator + +
  • @@ -235,8 +235,8 @@

    Configuration

  • Basic Authentication
  • Configure TLS
  • pgo RBAC
  • -
  • apiserver Configuration
  • -
  • postgres-operator Container Configuration
  • +
  • apiserver Configuration
  • +
  • postgres-operator Container Configuration
  • bash Completion
  • @@ -245,7 +245,7 @@

    Configuration

    -

    v3.0, 2018-06-04

    +

    v3.1, 2018-06-13

    Overview

    @@ -300,13 +300,11 @@

    Kube RBAC

    -

    If you are not using the demo namespace, it will be required to edit the following and change the namespace where the service account and cluster role bindings will be deployed.

    -
    -
    -

    $COROOT/deploy/service-account.yaml

    -
    -
    -

    $COROOT/deploy/cluster-role-binding.yaml

    +

    The CO_NAMESPACE environment variable determines the namespace +that is used within the deployment of the operator. If you +are deploying to the demo namespace, the following +should setting should be defined in your .bashrc: +export CO_NAMESPACE=demo

    @@ -571,7 +569,7 @@

    pgo RBAC

    -

    apiserver Configuration

    +

    apiserver Configuration

    The postgres-operator pod includes the apiserver which is a REST API that pgo users are able to communicate with.

    @@ -599,7 +597,7 @@

    apiserver Configuration

    set to the latest release of the Crunchy Container Suite.

    -

    pgo.yaml

    +

    pgo.yaml

    The default pgo.yaml configuration file, included in $COROOT/conf/apiserver/pgo.yaml, looks like this -

    @@ -988,7 +986,7 @@

    Disaster Recovery Using

    -

    postgres-operator Container Configuration

    +

    postgres-operator Container Configuration

    To enable debug level messages from the operator pod, set the CRUNCHY_DEBUG environment variable to true within its deployment file deployment.json.

    @@ -1198,7 +1196,7 @@

    Deploying pgpool

    +
    diff --git a/docs/installation/deployment/index.html b/docs/installation/deployment/index.html index 70f40d3b9a..00bc243c0b 100644 --- a/docs/installation/deployment/index.html +++ b/docs/installation/deployment/index.html @@ -5,7 +5,7 @@ - + Deployment :: Crunchy Data PostgreSQL Operator Documentation @@ -92,13 +92,6 @@ Helm Chart
    - -
  • -
  • @@ -113,6 +106,13 @@ Configuration
    +
  • +
  • +
  • @@ -228,14 +228,14 @@

    Deployment

    Table of Contents
    -

    v3.0, 2018-06-04

    +

    v3.1, 2018-06-13

    This document details verifying the installation of the PostgreSQL Operator @@ -301,7 +301,7 @@

    Verify Operator Status

    export CO_IMAGE_PREFIX=crunchydata
    -export CO_IMAGE_TAG=centos7-3.0
    +export CO_IMAGE_TAG=centos7-3.1
     export GOPATH=$HOME/odev
     export GOBIN=$GOPATH/bin
     export PATH=$PATH:$GOBIN
    @@ -315,7 +315,7 @@ 

    Verify Operator Status

    -

    Configure pgo Client

    +

    Configure pgo Client

    The pgo command line client requires TLS for securing the connection to the operator’s REST API. This configuration is performed as follows -

    @@ -435,8 +435,8 @@

    Verify pgo Client

    pgo version
    -pgo client version 3.0
    -apiserver version 3.0
    +pgo client version 3.1 +apiserver version 3.1
    @@ -621,7 +621,7 @@

    Next Steps

    diff --git a/docs/installation/helm-chart/index.html b/docs/installation/helm-chart/index.html index 29b691adde..f61f2da263 100644 --- a/docs/installation/helm-chart/index.html +++ b/docs/installation/helm-chart/index.html @@ -5,7 +5,7 @@ - + Helm Chart :: Crunchy Data PostgreSQL Operator Documentation @@ -92,13 +92,6 @@ Helm Chart
    - -
  • -
  • @@ -113,6 +106,13 @@ Configuration
    +
  • +
  • +
  • @@ -225,7 +225,7 @@

    Helm Chart

    -

    v3.0, 2018-06-04

    +

    v3.1, 2018-06-13

    Helm Chart

    @@ -237,7 +237,7 @@

    Helm Chart

    export CO_IMAGE_PREFIX=crunchydata
    -export CO_IMAGE_TAG=centos7-3.0
    +export CO_IMAGE_TAG=centos7-3.1
     make pull
    @@ -274,7 +274,7 @@

    Next Steps

    +
    diff --git a/docs/installation/index.html b/docs/installation/index.html index cec4958b03..3eff8bfeb8 100644 --- a/docs/installation/index.html +++ b/docs/installation/index.html @@ -5,7 +5,7 @@ - + Installation :: Crunchy Data PostgreSQL Operator Documentation @@ -92,13 +92,6 @@ Helm Chart
    - -
  • -
  • @@ -113,6 +106,13 @@ Configuration
    +
  • +
  • +
  • @@ -221,7 +221,7 @@

    Installation

    -

    v3.0, 2018-06-04

    +

    v3.1, 2018-06-13

    Installation

    diff --git a/docs/installation/index.xml b/docs/installation/index.xml index 2fd35301fc..c99fb55e76 100644 --- a/docs/installation/index.xml +++ b/docs/installation/index.xml @@ -17,7 +17,7 @@ Thu, 26 Apr 2018 15:22:14 -0700 https://crunchydata.github.io/postgres-operator/installation/quick-installation/ - Table of Contents Overview Quickstart GKE/PKS Openshift Container Platform Next Steps v3.0, 2018-06-04 + Table of Contents Overview Quickstart GKE/PKS Openshift Container Platform Next Steps v3.1, 2018-06-13 Overview There are currently quickstart scripts that seek to automate the deployment to popular Kubernetes environments - quickstart-for-gke.sh quickstart-for-ocp.sh @@ -31,9 +31,10 @@ Thu, 26 Apr 2018 15:22:21 -0700 https://crunchydata.github.io/postgres-operator/installation/manual-installation/ - Table of Contents Project Structure Installation Prerequsites Basic Installation Create HostPath Directory Build Images & Deploy Makefile Targets Next Steps v3.0, 2018-06-04 - Project Structure To perform an installation of the operator, first create the project structure as follows on your host, here we assume a local directory called odev - - export GOPATH=$HOME/odev mkdir -p $HOME/odev/src $HOME/odev/bin $HOME/odev/pkg mkdir -p $GOPATH/src/github.com/crunchydata/ Next, get a tagged release of the source code - + Table of Contents Project Structure Installation Prerequsites Basic Installation Create HostPath Directory Build Images & Deploy Makefile Targets Next Steps v3.1, 2018-06-13 + Project Structure First, define the following environment variables in .bashrc: + export GOPATH=$HOME/odev export GOBIN=$GOPATH/bin export CO_NAMESPACE=demo export CO_CMD=kubectl export COROOT=$GOPATH/src/github.com/crunchydata/postgres-operator export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.1 export CO_BASEOS=centos7 When deploying on Openshift Container Platform, the CO_CMD environment variable should be: + export CO_CMD=oc To perform an installation of the operator, first create the project structure as follows on your host, here we assume a local directory called odev - @@ -42,31 +43,19 @@ Thu, 26 Apr 2018 15:24:16 -0700 https://crunchydata.github.io/postgres-operator/installation/helm-chart/ - v3.0, 2018-06-04 + v3.1, 2018-06-13 Helm Chart First, pull prebuilt versions from Dockerhub of the postgres-operator containers, specify the image versions, and execute the following Makefile target - - export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.0 make pull Then, build and deploy the operator using the provided Helm chart - + export CO_IMAGE_PREFIX=crunchydata export CO_IMAGE_TAG=centos7-3.1 make pull Then, build and deploy the operator using the provided Helm chart - cd $COROOT/chart helm install ./postgres-operator helm ls Next Steps Next, visit the Deployment page to deploy the Operator, verify the installation, and view various storage configurations. - - Upgrading the Operator - https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/ - Tue, 24 Apr 2018 18:27:30 -0700 - - https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/ - v3.0, 2018-06-04 - Upgrading from v2.4 to v2.5 For a full list of additions and revisions that occurred in the PostgreSQL Operator v2.5 release, please view the related release page here. - Required Updates This section notes some required steps that will need to be taken in the process of upgrading from v2.4 to v2.5. - Configuration File It will be necessary to update your existing pgo. - - Deployment https://crunchydata.github.io/postgres-operator/installation/deployment/ Thu, 26 Apr 2018 15:26:40 -0700 https://crunchydata.github.io/postgres-operator/installation/deployment/ - Table of Contents Verify Operator Status Configure pgo Client Verify pgo Client Storage Configuration Next Steps v3.0, 2018-06-04 + Table of Contents Verify Operator Status Configure pgo Client Verify pgo Client Storage Configuration Next Steps v3.1, 2018-06-13 This document details verifying the installation of the PostgreSQL Operator is successful, in addition to detailing some different storage configurations that can be made. Verify Operator Status To verify that the operator is deployed and running, run the following: kubectl get pod --selector=name=postgres-operator You should see output similar to this: @@ -78,10 +67,22 @@ Tue, 24 Apr 2018 18:26:56 -0700 https://crunchydata.github.io/postgres-operator/installation/configuration/ - Table of Contents Overview Openshift Container Platform Security Configuration Kube RBAC Basic Authentication Configure TLS pgo RBAC apiserver Configuration postgres-operator Container Configuration bash Completion REST API Deploying pgpool v3.0, 2018-06-04 + Table of Contents Overview Openshift Container Platform Security Configuration Kube RBAC Basic Authentication Configure TLS pgo RBAC apiserver Configuration postgres-operator Container Configuration bash Completion REST API Deploying pgpool v3.1, 2018-06-13 Overview This document describes how to configure the operator beyond the default configurations in addition to detailing what the configuration settings mean. Openshift Container Platform To run the Operator on Openshift Container Platform note the following requirements - + + Upgrading the Operator + https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/ + Tue, 24 Apr 2018 18:27:30 -0700 + + https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/ + v3.1, 2018-06-13 + Upgrading from v2.4 to v2.5 For a full list of additions and revisions that occurred in the PostgreSQL Operator v2.5 release, please view the related release page here. + Required Updates This section notes some required steps that will need to be taken in the process of upgrading from v2.4 to v2.5. + Configuration File It will be necessary to update your existing pgo. + + \ No newline at end of file diff --git a/docs/installation/manual-installation/index.html b/docs/installation/manual-installation/index.html index c635cab95e..ef5e0785e7 100644 --- a/docs/installation/manual-installation/index.html +++ b/docs/installation/manual-installation/index.html @@ -5,7 +5,7 @@ - + Manual Installation :: Crunchy Data PostgreSQL Operator Documentation @@ -92,13 +92,6 @@ Helm Chart
    - -
  • -
  • @@ -113,6 +106,13 @@ Configuration
    +
  • +
  • +
  • @@ -240,18 +240,41 @@

    Manual Installation

    -

    v3.0, 2018-06-04

    +

    v3.1, 2018-06-13

    Project Structure

    -

    To perform an installation of the operator, first create the project structure as follows on your host, here we assume a local directory called odev -

    +

    First, define the following environment variables in .bashrc:

    export GOPATH=$HOME/odev
    -mkdir -p $HOME/odev/src $HOME/odev/bin $HOME/odev/pkg
    +export GOBIN=$GOPATH/bin
    +export CO_NAMESPACE=demo
    +export CO_CMD=kubectl
    +export COROOT=$GOPATH/src/github.com/crunchydata/postgres-operator
    +export CO_IMAGE_PREFIX=crunchydata
    +export CO_IMAGE_TAG=centos7-3.1
    +export CO_BASEOS=centos7
    +
    +
    +
    +

    When deploying on Openshift Container Platform, the CO_CMD environment +variable should be:

    +
    +
    +
    +
    export CO_CMD=oc
    +
    +
    +
    +

    To perform an installation of the operator, first create the project structure as follows on your host, here we assume a local directory called odev -

    +
    +
    +
    +
    mkdir -p $HOME/odev/src $HOME/odev/bin $HOME/odev/pkg
     mkdir -p $GOPATH/src/github.com/crunchydata/
    @@ -263,7 +286,7 @@

    Project Structure

    cd $GOPATH/src/github.com/crunchydata
     git clone https://github.com/CrunchyData/postgres-operator.git
     cd postgres-operator
    -git checkout 3.0
    +git checkout 3.1
    @@ -302,13 +325,8 @@

    Installation Prerequsites

    -

    If you are not using the demo namespace, it will be required to edit the following and change the namespace where the service account and cluster role bindings will be deployed.

    -
    -
    -

    $COROOT/deploy/service-account.yaml

    -
    -
    -

    $COROOT/deploy/cluster-role-binding.yaml

    +

    The namespace used by the operator is determined by the +CO_NAMESPACE environment variable setting.

    @@ -318,7 +336,7 @@

    Installation Prerequsites

    Permissions are granted to the Operator by means of a Service Account called postgres-operator. That service account is added to the Operator deployment.

    -

    The postgres-operator service account is granted cluster-admin priviledges using a cluster role binding postgres-operator-cluster-role-binding.

    +

    The postgres-operator service account is granted priviledges using a role binding pgo-role-binding.

    See here for more details on how to enable RBAC roles and modify the scope of the permissions to suit your needs.

    @@ -347,10 +365,7 @@

    Create HostPath Directory

    -
    export CO_NAMESPACE=demo
    -export CO_CMD=kubectl
    -export COROOT=$GOPATH/src/github.com/crunchydata/postgres-operator
    -go get github.com/blang/expenv
    +
    go get github.com/blang/expenv
     $COROOT/pv/create-pv.sh
    @@ -375,13 +390,11 @@

    Build Images & Deploy

    Packaged Images

    -

    To pull prebuilt versions from Dockerhub of the postgres-operator containers, specify the image versions, and execute the following Makefile target -

    +

    To pull prebuilt versions from Dockerhub of the postgres-operator containers, execute the following Makefile target -

    -
    export CO_IMAGE_PREFIX=crunchydata
    -export CO_IMAGE_TAG=centos7-3.0
    -make pull
    +
    make pull
    @@ -393,14 +406,14 @@

    Packaged Images

    Github Releases

  • -

    extract (e.g. tar xvzf postgres-operator.3.0.tar.gz)

    +

    extract (e.g. tar xvzf postgres-operator.3.1.tar.gz)

  • cd $HOME
    -tar xvzf ./postgres-operator.3.0.tar.gz
    +tar xvzf ./postgres-operator.3.1.tar.gz
    @@ -491,30 +504,21 @@

    Requirements

    -

    First, install the project library dependencies. The godep dependency manager is used for this purpose. -

    +

    Before compiling the Operator, it’s necessary to install the Mercurial requirement.

    -
    cd $COROOT
    -make setup
    +
    sudo yum -y install mercurial
    -

    Then, compile the PostgreSQL Operator using the Makefile.

    +

    Then, install the project library dependencies. The godep dependency manager is used for this purpose. Then, compile the PostgreSQL Operator using the Makefile and deploy the operator to your Kubernetes cluster.

    cd $COROOT
    +make setup
     make all
    -which pgo
    -
    -
    -
    -

    Finally, deploy the operator to your Kubernetes cluster.

    -
    -
    -
    -
    cd $COROOT
     make deployoperator
    diff --git a/docs/installation/quick-installation/index.html b/docs/installation/quick-installation/index.html index 950837ce32..95d2c53c8e 100644 --- a/docs/installation/quick-installation/index.html +++ b/docs/installation/quick-installation/index.html @@ -5,7 +5,7 @@ - + Quick Installation :: Crunchy Data PostgreSQL Operator Documentation @@ -92,13 +92,6 @@ Helm Chart
    - -
  • -
    - - Upgrading the Operator - -
  • @@ -113,6 +106,13 @@ Configuration
    +
  • +
  • +
    + + Upgrading the Operator + +
  • @@ -230,7 +230,7 @@

    Quick Installation

  • Overview
  • Quickstart
  • @@ -238,7 +238,7 @@

    Quick Installation

    -

    v3.0, 2018-06-04

    +

    v3.1, 2018-06-13

    Overview

    @@ -277,7 +277,7 @@

    Overview

    Quickstart

    -

    GKE/PKS

    +

    GKE/PKS

    The quickstart-for-gke.sh script will allow users to set up the Postgres Operator quickly on GKE including PKS. This script is tested on GKE but can be modified for use with other Kubernetes environments as well.

    diff --git a/docs/installation/upgrading-the-operator/index.html b/docs/installation/upgrading-the-operator/index.html index 298a2968f7..298edfcab2 100644 --- a/docs/installation/upgrading-the-operator/index.html +++ b/docs/installation/upgrading-the-operator/index.html @@ -5,7 +5,7 @@ - + Upgrading the Operator :: Crunchy Data PostgreSQL Operator Documentation @@ -92,13 +92,6 @@ Helm Chart
    - -
  • -
  • @@ -113,6 +106,13 @@ Configuration
    +
  • +
  • +
  • @@ -225,7 +225,7 @@

    Upgrading the Operator

    -

    v3.0, 2018-06-04

    +

    v3.1, 2018-06-13

    @@ -308,7 +308,7 @@

    Secrets

    For a full list of additions and revisions that occurred in the PostgreSQL Operator v2.5 release, please view the related release -page here.

    +page here.

    Required Updates

    @@ -463,8 +463,8 @@

    Replica CRD

    + +
    diff --git a/docs/sitemap.xml b/docs/sitemap.xml index 1395000370..d8de2e43aa 100644 --- a/docs/sitemap.xml +++ b/docs/sitemap.xml @@ -18,8 +18,8 @@ - https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/ - 2018-04-24T18:27:30-07:00 + https://crunchydata.github.io/postgres-operator/installation/deployment/ + 2018-04-26T15:26:40-07:00 @@ -28,13 +28,13 @@ - https://crunchydata.github.io/postgres-operator/installation/deployment/ - 2018-04-26T15:26:40-07:00 + https://crunchydata.github.io/postgres-operator/installation/configuration/ + 2018-04-24T18:26:56-07:00 - https://crunchydata.github.io/postgres-operator/installation/configuration/ - 2018-04-24T18:26:56-07:00 + https://crunchydata.github.io/postgres-operator/installation/upgrading-the-operator/ + 2018-04-24T18:27:30-07:00 diff --git a/docs/tags/index.html b/docs/tags/index.html index 938a4f18ea..b6332f41b3 100644 --- a/docs/tags/index.html +++ b/docs/tags/index.html @@ -5,7 +5,7 @@ - + Tags :: Crunchy Data PostgreSQL Operator Documentation @@ -91,13 +91,6 @@ Helm Chart
    - -
  • -
  • @@ -112,6 +105,13 @@ Configuration
    +
  • +
  • +
  • diff --git a/docs/theme-original/style.css b/docs/theme-original/style.css index 0049ad8541..7d35507c4e 100644 --- a/docs/theme-original/style.css +++ b/docs/theme-original/style.css @@ -670,22 +670,18 @@ code, kbd, pre, samp { font-size: 92%; } code { - border-radius: 2px; - white-space: nowrap; - background: #FFF7DD; - border: 1px solid #ffeeb8; - padding: 0px 2px; } + padding: .2rem .4rem; + font-size: 90%; + color: #bd4147; + background-color: #f8f9fa; + border-radius: .25rem; } .hljs { background: #1d1f21; } -code + .copy-to-clipboard { - margin-left: -2px; - border-left: 0 !important; - font-size: inherit !important; - vertical-align: middle; - height: 21px; - top: -1px; } +.copy-to-clipboard { + display:none; +} pre { padding: 1rem; @@ -1049,24 +1045,6 @@ a.github-link { #body #breadcrumbs span { padding: 0 0.1rem; } -.copy-to-clipboard { - display: inline-block; - position: relative; - color: #5e5e5e; - background-color: #f8efd1; - text-align: center; - cursor: pointer; - border-radius: 0 2px 2px 0; - padding: 0.1rem; - border: 1px solid #ffeeb8; - -webkit-transition: all 0.15s ease; - -moz-transition: all 0.15s ease; - -ms-transition: all 0.15s ease; - transition: all 0.15s ease; } - -.copy-to-clipboard:hover { - background-color: #f8efd1; } - .clippy-icon { padding-left: 6px; padding-right: 4px; @@ -1081,18 +1059,6 @@ a.github-link { fill: black !important; pointer-events: none; } -pre .copy-to-clipboard { - position: absolute; - right: 4px; - top: 4px; - background-color: #e4e4e4; - color: #ccc; - border-radius: 2px; } - -pre .copy-to-clipboard:hover { - background-color: #e4e4e4; - color: #fff; } - .parent-element { -webkit-transform-style: preserve-3d; -moz-transform-style: preserve-3d;