Skip to content

Commit

Permalink
Merge pull request #330 from rook/release-1.8
Browse files Browse the repository at this point in the history
Sync from upstream release-1.8 to downstream release-4.10
  • Loading branch information
leseb committed Jan 24, 2022
2 parents aa13613 + b2937fb commit 327cedf
Show file tree
Hide file tree
Showing 67 changed files with 1,052 additions and 505 deletions.
14 changes: 12 additions & 2 deletions .github/workflows/canary-integration-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,13 @@ jobs:
kubectl -n rook-ceph exec $toolbox -- mkdir -p /etc/ceph/test-data
kubectl -n rook-ceph cp tests/ceph-status-out $toolbox:/etc/ceph/test-data/
kubectl -n rook-ceph cp deploy/examples/create-external-cluster-resources.py $toolbox:/etc/ceph
# remove the existing client auth that will be re-created by external script
kubectl -n rook-ceph exec $toolbox -- ceph auth del client.csi-cephfs-node
kubectl -n rook-ceph exec $toolbox -- ceph auth del client.csi-cephfs-provisioner
kubectl -n rook-ceph exec $toolbox -- ceph auth del client.csi-rbd-node
kubectl -n rook-ceph exec $toolbox -- ceph auth del client.csi-rbd-provisioner
# print existing client auth
kubectl -n rook-ceph exec $toolbox -- ceph auth ls
timeout 10 sh -c "until kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool; do echo 'waiting for script to succeed' && sleep 1; done"
- name: dry run external script create-external-cluster-resources.py
Expand Down Expand Up @@ -831,9 +838,12 @@ jobs:
with:
fetch-depth: 0

- name: run Encryption KMS IBM Key Protect
- name: run encryption KMS IBM Key Protect
uses: ./.github/workflows/encryption-pvc-kms-ibm-kp
if: "env.IBM_KP_INSTANCE_ID != '' && env.IBM_KP_SERVICE_API_KEY != ''"
if: "env.IBM_KP_SERVICE_INSTANCE_ID != '' && env.IBM_KP_SERVICE_API_KEY != ''"
env:
IBM_KP_SERVICE_INSTANCE_ID: ${{ secrets.IBM_INSTANCE_ID }}
IBM_KP_SERVICE_API_KEY: ${{ secrets.IBM_SERVICE_API_KEY }}
with:
ibm-instance-id: ${{ secrets.IBM_INSTANCE_ID }}
ibm-service-api-key: ${{ secrets.IBM_SERVICE_API_KEY }}
Expand Down
5 changes: 4 additions & 1 deletion .github/workflows/daily-nightly-jobs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,10 @@ jobs:

- name: run Encryption KMS IBM Key Protect
uses: ./.github/workflows/encryption-pvc-kms-ibm-kp
if: "env.IBM_KP_INSTANCE_ID != '' && env.IBM_KP_SERVICE_API_KEY != ''"
if: "env.IBM_KP_SERVICE_INSTANCE_ID != '' && env.IBM_KP_SERVICE_API_KEY != ''"
env:
IBM_KP_SERVICE_INSTANCE_ID: ${{ secrets.IBM_INSTANCE_ID }}
IBM_KP_SERVICE_API_KEY: ${{ secrets.IBM_SERVICE_API_KEY }}
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
ibm-instance-id: ${{ secrets.IBM_KP_INSTANCE_ID }}
Expand Down
10 changes: 5 additions & 5 deletions .github/workflows/encryption-pvc-kms-ibm-kp/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: Encryption KMS IBM Key Protect
description: Reusable workflow to test Encryption KMS IBM Key Protect
inputs:
ibm-instance-id:
description: IBM_KP_INSTANCE_ID from the calling workflow
description: IBM_KP_SERVICE_INSTANCE_ID from the calling workflow
required: true
ibm-service-api-key:
description: IBM_KP_SERVICE_API_KEY from the calling workflow
Expand All @@ -15,12 +15,12 @@ runs:
using: "composite"
steps:
- name: fail if env no present
if: "env.IBM_KP_INSTANCE_ID == '' || env.IBM_KP_SERVICE_API_KEY == ''"
if: "env.IBM_KP_SERVICE_INSTANCE_ID == '' || env.IBM_KP_SERVICE_API_KEY == ''"
env:
IBM_KP_INSTANCE_ID: ${{ inputs.ibm-instance-id }}
IBM_KP_SERVICE_INSTANCE_ID: ${{ inputs.ibm-instance-id }}
IBM_KP_SERVICE_API_KEY: ${{ inputs.ibm-service-api-key }}
shell: bash --noprofile --norc -eo pipefail -x {0}
run: echo "IBM_KP_INSTANCE_ID and IBM_KP_SERVICE_API_KEY must be set in the environment" && exit 0
run: echo "IBM_KP_SERVICE_INSTANCE_ID and IBM_KP_SERVICE_API_KEY must be set in the environment" && exit 0

- name: setup cluster resources
uses: ./.github/workflows/setup-cluster-resources
Expand All @@ -42,7 +42,7 @@ runs:
- name: deploy cluster
shell: bash --noprofile --norc -eo pipefail -x {0}
env:
IBM_KP_INSTANCE_ID: ${{ inputs.ibm-instance-id }}
IBM_KP_SERVICE_INSTANCE_ID: ${{ inputs.ibm-instance-id }}
IBM_KP_SERVICE_API_KEY: ${{ inputs.ibm-service-api-key }}
run: |
tests/scripts/github-action-helper.sh deploy_manifest_with_local_build deploy/examples/operator.yaml
Expand Down
7 changes: 5 additions & 2 deletions Documentation/ceph-cluster-crd.md
Original file line number Diff line number Diff line change
Expand Up @@ -516,13 +516,15 @@ Annotations and Labels can be specified so that the Rook components will have th

You can set annotations / labels for Rook components for the list of key value pairs:

* `all`: Set annotations / labels for all components
* `all`: Set annotations / labels for all components except `clusterMetadata`.
* `mgr`: Set annotations / labels for MGRs
* `mon`: Set annotations / labels for mons
* `osd`: Set annotations / labels for OSDs
* `prepareosd`: Set annotations / labels for OSD Prepare Jobs
* `monitoring`: Set annotations / labels for service monitor
* `crashcollector`: Set annotations / labels for crash collectors
* `clusterMetadata`: Set annotations only to `rook-ceph-mon-endpoints` configmap and the `rook-ceph-mon` and `rook-ceph-admin-keyring` secrets. These annotations will not be merged with the `all` annotations. The common usage is for backing up these critical resources with `kubed`.
Note the clusterMetadata annotation will not be merged with the `all` annotation.
When other keys are set, `all` will be merged together with the specific component.

### Placement Configuration Settings
Expand Down Expand Up @@ -601,10 +603,11 @@ Priority class names can be specified so that the Rook components will have thos

You can set priority class names for Rook components for the list of key value pairs:

* `all`: Set priority class names for MGRs, Mons, OSDs.
* `all`: Set priority class names for MGRs, Mons, OSDs, and crashcollectors.
* `mgr`: Set priority class names for MGRs.
* `mon`: Set priority class names for Mons.
* `osd`: Set priority class names for OSDs.
* `crashcollector`: Set priority class names for crashcollectors.

The specific component keys will act as overrides to `all`.

Expand Down
4 changes: 2 additions & 2 deletions Documentation/ceph-kms.md
Original file line number Diff line number Diff line change
Expand Up @@ -266,8 +266,8 @@ security:
kms:
# name of the k8s config map containing all the kms connection details
connectionDetails:
KMS_PROVIDER: ibm-kp
IBM_KP_INSTANCE_ID: <instance ID that was retrieved in the first paragraph>
KMS_PROVIDER: ibmkeyprotect
IBM_KP_SERVICE_INSTANCE_ID: <instance ID that was retrieved in the first paragraph>
# name of the k8s secret containing the service API Key
tokenSecretName: ibm-kp-svc-api-key
```
Expand Down
2 changes: 1 addition & 1 deletion Documentation/ceph-monitoring.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ With the Prometheus operator running, we can create a service monitor that will
From the root of your locally cloned Rook repo, go the monitoring directory:

```console
$ git clone --single-branch --branch v1.8.2 https://github.com/rook/rook.git
$ git clone --single-branch --branch v1.8.3 https://github.com/rook/rook.git
cd rook/deploy/examples/monitoring
```

Expand Down
38 changes: 19 additions & 19 deletions Documentation/ceph-upgrade.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,12 @@ With this upgrade guide, there are a few notes to consider:

Unless otherwise noted due to extenuating requirements, upgrades from one patch release of Rook to
another are as simple as updating the common resources and the image of the Rook operator. For
example, when Rook v1.8.2 is released, the process of updating from v1.8.0 is as simple as running
example, when Rook v1.8.3 is released, the process of updating from v1.8.0 is as simple as running
the following:

First get the latest common resources manifests that contain the latest changes for Rook v1.8.
```sh
git clone --single-branch --depth=1 --branch v1.8.2 https://github.com/rook/rook.git
git clone --single-branch --depth=1 --branch v1.8.3 https://github.com/rook/rook.git
cd rook/deploy/examples
```

Expand All @@ -87,7 +87,7 @@ section for instructions on how to change the default namespaces in `common.yaml
Then apply the latest changes from v1.8 and update the Rook Operator image.
```console
kubectl apply -f common.yaml -f crds.yaml
kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.8.2
kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.8.3
```

As exemplified above, it is a good practice to update Rook-Ceph common resources from the example
Expand Down Expand Up @@ -266,7 +266,7 @@ Any pod that is using a Rook volume should also remain healthy:
## Rook Operator Upgrade Process

In the examples given in this guide, we will be upgrading a live Rook cluster running `v1.7.8` to
the version `v1.8.2`. This upgrade should work from any official patch release of Rook v1.7 to any
the version `v1.8.3`. This upgrade should work from any official patch release of Rook v1.7 to any
official patch release of v1.8.

**Rook release from `master` are expressly unsupported.** It is strongly recommended that you use
Expand All @@ -291,7 +291,7 @@ by the Operator. Also update the Custom Resource Definitions (CRDs).

Get the latest common resources manifests that contain the latest changes.
```sh
git clone --single-branch --depth=1 --branch v1.8.2 https://github.com/rook/rook.git
git clone --single-branch --depth=1 --branch v1.8.3 https://github.com/rook/rook.git
cd rook/deploy/examples
```

Expand Down Expand Up @@ -343,7 +343,7 @@ The largest portion of the upgrade is triggered when the operator's image is upd
When the operator is updated, it will proceed to update all of the Ceph daemons.

```sh
kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.8.2
kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.8.3
```

#### Admission controller
Expand Down Expand Up @@ -377,16 +377,16 @@ watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster=
```

As an example, this cluster is midway through updating the OSDs. When all deployments report `1/1/1`
availability and `rook-version=v1.8.2`, the Ceph cluster's core components are fully updated.
availability and `rook-version=v1.8.3`, the Ceph cluster's core components are fully updated.

>```
>Every 2.0s: kubectl -n rook-ceph get deployment -o j...
>
>rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.8.2
>rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.8.2
>rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.8.2
>rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.8.2
>rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.8.2
>rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.8.3
>rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.8.3
>rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.8.3
>rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.8.3
>rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.8.3
>rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.7.8
>rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.7.8
>```
Expand All @@ -398,14 +398,14 @@ An easy check to see if the upgrade is totally finished is to check that there i
# kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq
This cluster is not yet finished:
rook-version=v1.7.8
rook-version=v1.8.2
rook-version=v1.8.3
This cluster is finished:
rook-version=v1.8.2
rook-version=v1.8.3
```

### **5. Verify the updated cluster**

At this point, your Rook operator should be running version `rook/ceph:v1.8.2`.
At this point, your Rook operator should be running version `rook/ceph:v1.8.3`.

Verify the Ceph cluster's health using the [health verification section](#health-verification).

Expand Down Expand Up @@ -545,14 +545,14 @@ kubectl -n $ROOK_OPERATOR_NAMESPACE edit configmap rook-ceph-operator-config
The default upstream images are included below, which you can change to your desired images.

```yaml
ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.5.0"
ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.5.1"
ROOK_CSI_REGISTRAR_IMAGE: "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0"
ROOK_CSI_PROVISIONER_IMAGE: "k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0"
ROOK_CSI_ATTACHER_IMAGE: "k8s.gcr.io/sig-storage/csi-attacher:v3.4.0"
ROOK_CSI_RESIZER_IMAGE: "k8s.gcr.io/sig-storage/csi-resizer:v1.3.0"
ROOK_CSI_SNAPSHOTTER_IMAGE: "k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0"
CSI_VOLUME_REPLICATION_IMAGE: "quay.io/csiaddons/volumereplication-operator:v0.1.0"
ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.1.0"
ROOK_CSIADDONS_IMAGE: "quay.io/csiaddons/k8s-sidecar:v0.2.1"
```

### **Use default images**
Expand All @@ -576,7 +576,7 @@ k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.0
quay.io/cephcsi/cephcsi:v3.5.0
quay.io/cephcsi/cephcsi:v3.5.1
quay.io/csiaddons/volumereplication-operator:v0.1.0
quay.io/csiaddons/k8s-sidecar:v0.1.0
quay.io/csiaddons/k8s-sidecar:v0.2.1
```
4 changes: 2 additions & 2 deletions Documentation/helm-operator.md
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ The following tables lists the configurable parameters of the rook-operator char
| `csi.rbdLivenessMetricsPort` | Ceph CSI RBD driver metrics port. | `8080` |
| `csi.forceCephFSKernelClient` | Enable Ceph Kernel clients on kernel < 4.17 which support quotas for Cephfs. | `true` |
| `csi.kubeletDirPath` | Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag) | `/var/lib/kubelet` |
| `csi.cephcsi.image` | Ceph CSI image. | `quay.io/cephcsi/cephcsi:v3.5.0` |
| `csi.cephcsi.image` | Ceph CSI image. | `quay.io/cephcsi/cephcsi:v3.5.1` |
| `csi.rbdPluginUpdateStrategy` | CSI Rbd plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. | `OnDelete` |
| `csi.cephFSPluginUpdateStrategy` | CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. | `OnDelete` |
| `csi.registrar.image` | Kubernetes CSI registrar image. | `k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0` |
Expand All @@ -147,7 +147,7 @@ The following tables lists the configurable parameters of the rook-operator char
| `csi.volumeReplication.enabled` | Enable Volume Replication. | `false` |
| `csi.volumeReplication.image` | Volume Replication Controller image. | `quay.io/csiaddons/volumereplication-operator:v0.1.0` |
| `csi.csiAddons.enabled` | Enable CSIAddons | `false` |
| `csi.csiAddons.image` | CSIAddons Sidecar image. | `quay.io/csiaddons/k8s-sidecar:v0.1.0` |
| `csi.csiAddons.image` | CSIAddons Sidecar image. | `quay.io/csiaddons/k8s-sidecar:v0.2.1` |
| `admissionController.tolerations` | Array of tolerations in YAML format which will be added to admission controller deployment. | <none> |
| `admissionController.nodeAffinity` | The node labels for affinity of the admission controller deployment (***) | <none> |
| `allowMultipleFilesystems` | **(experimental in Octopus (v15))** Allows multiple filesystems to be deployed to a Ceph cluster. | `false` |
Expand Down
2 changes: 1 addition & 1 deletion Documentation/quickstart.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ In order to configure the Ceph storage cluster, at least one of these local stor
A simple Rook cluster can be created with the following kubectl commands and [example manifests](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples).

```console
$ git clone --single-branch --branch v1.8.2 https://github.com/rook/rook.git
$ git clone --single-branch --branch v1.8.3 https://github.com/rook/rook.git
cd rook/deploy/examples
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
kubectl create -f cluster.yaml
Expand Down
4 changes: 2 additions & 2 deletions build/rbac/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -758,8 +758,8 @@ spec:
- min: 9283
max: 9283
# port for CSIAddons
- min: 9061
max: 9079
- min: 9070
max: 9070
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
Expand Down
48 changes: 48 additions & 0 deletions deploy/charts/rook-ceph-cluster/templates/volumesnapshotclass.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
{{- $filesystemvsc := .Values.cephFileSystemVolumeSnapshotClass -}}
{{- $blockpoolvsc := .Values.cephBlockPoolsVolumeSnapshotClass -}}

---
{{- if default false $filesystemvsc.enabled }}
{{- if .Capabilities.APIVersions.Has "snapshot.storage.k8s.io/v1" }}
apiVersion: snapshot.storage.k8s.io/v1
{{- else }}
apiVersion: snapshot.storage.k8s.io/v1beta1
{{- end }}
kind: VolumeSnapshotClass
metadata:
name: {{ $filesystemvsc.name }}
annotations:
snapshot.storage.kubernetes.io/is-default-class: "{{ if default false $filesystemvsc.isDefault }}true{{ else }}false{{ end }}"
driver: {{ .Values.operatorNamespace }}.cephfs.csi.ceph.com
parameters:
clusterID: {{ .Release.Namespace }}
csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/snapshotter-secret-namespace: {{ .Release.Namespace }}
{{- if $filesystemvsc.parameters }}
{{ toYaml $filesystemvsc.parameters | indent 2 }}
{{- end }}
deletionPolicy: {{ default "Delete" $filesystemvsc.deletionPolicy }}
{{- end }}

---
{{- if default false $blockpoolvsc.enabled }}
{{- if .Capabilities.APIVersions.Has "snapshot.storage.k8s.io/v1" }}
apiVersion: snapshot.storage.k8s.io/v1
{{- else }}
apiVersion: snapshot.storage.k8s.io/v1beta1
{{- end }}
kind: VolumeSnapshotClass
metadata:
name: {{ $blockpoolvsc.name }}
annotations:
snapshot.storage.kubernetes.io/is-default-class: "{{ if default false $blockpoolvsc.isDefault }}true{{ else }}false{{ end }}"
driver: {{ .Values.operatorNamespace }}.rbd.csi.ceph.com
parameters:
clusterID: {{ .Release.Namespace }}
csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/snapshotter-secret-namespace: {{ .Release.Namespace }}
{{- if $blockpoolvsc.parameters }}
{{ toYaml $blockpoolvsc.parameters | indent 2 }}
{{- end }}
deletionPolicy: {{ default "Delete" $blockpoolvsc.deletionPolicy }}
{{- end }}
17 changes: 17 additions & 0 deletions deploy/charts/rook-ceph-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ cephClusterSpec:
# mon: rook-ceph-mon-priority-class
# osd: rook-ceph-osd-priority-class
# mgr: rook-ceph-mgr-priority-class
# crashcollector: rook-ceph-crashcollector-priority-class

storage: # cluster level storage configuration and selection
useAllNodes: true
Expand Down Expand Up @@ -397,6 +398,22 @@ cephFileSystems:
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4

cephFileSystemVolumeSnapshotClass:
enabled: false
name: ceph-filesystem
isDefault: true
deletionPolicy: Delete
# see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#cephfs-snapshots for available configuration
parameters: {}

cephBlockPoolsVolumeSnapshotClass:
enabled: false
name: ceph-block
isDefault: false
deletionPolicy: Delete
# see https://rook.io/docs/rook/latest/ceph-csi-snapshot.html#rbd-snapshots for available configuration
parameters: {}

cephObjectStores:
- name: ceph-objectstore
# see https://github.com/rook/rook/blob/master/Documentation/ceph-object-store-crd.md#object-store-settings for available configuration
Expand Down
Loading

0 comments on commit 327cedf

Please sign in to comment.