Skip to content

Commit

Permalink
Merge pull request #202 from shiftstack/sync-4.13
Browse files Browse the repository at this point in the history
  • Loading branch information
openshift-merge-robot committed Jul 19, 2023
2 parents af5c48d + 840ea68 commit c59255a
Show file tree
Hide file tree
Showing 339 changed files with 8,646 additions and 4,152 deletions.
22 changes: 11 additions & 11 deletions .github/workflows/pr.yaml
Expand Up @@ -10,23 +10,23 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2

- name: Fetch history
run: git fetch --prune --unshallow
uses: actions/checkout@v3
with:
fetch-depth: 0

- name: Set up Helm
uses: azure/setup-helm@v1
uses: azure/setup-helm@v3
with:
version: v3.6.1
version: v3.10.0

- uses: actions/setup-python@v2
- uses: actions/setup-python@v4
with:
python-version: 3.7
python-version: '3.9'
check-latest: true

# see example https://github.com/helm/chart-testing-action
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.1.0
uses: helm/chart-testing-action@v2.3.1

# https://github.com/helm/chart-testing/blob/main/doc/ct_lint.md
- name: Run chart-testing (lint)
run: ct lint --target-branch=${GITHUB_BASE_REF} --check-version-increment=false
run: ct lint --target-branch ${GITHUB_BASE_REF}
7 changes: 3 additions & 4 deletions .github/workflows/release.yaml
Expand Up @@ -10,10 +10,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2

- name: Fetch history
run: git fetch --prune --unshallow
uses: actions/checkout@v3
with:
fetch-depth: 0

- name: Configure Git
run: |
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Expand Up @@ -155,7 +155,7 @@ build-cmd-%: work $(SOURCES)
test: unit functional

check: work
go run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.1 run ./...
go run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 run ./...

unit: work
go test -tags=unit $(shell go list ./... | sed -e '/sanity/ { N; d; }' | sed -e '/tests/ {N; d;}') $(TESTARGS)
Expand Down
4 changes: 2 additions & 2 deletions charts/cinder-csi-plugin/Chart.yaml
@@ -1,8 +1,8 @@
apiVersion: v1
appVersion: latest
appVersion: v1.26.3
description: Cinder CSI Chart for OpenStack
name: openstack-cinder-csi
version: 2.3.0
version: 2.26.1
home: https://github.com/kubernetes/cloud-provider-openstack
icon: https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png
maintainers:
Expand Down
2 changes: 1 addition & 1 deletion charts/cinder-csi-plugin/values.yaml
Expand Up @@ -48,7 +48,7 @@ csi:
resources: {}
plugin:
image:
repository: docker.io/k8scloudprovider/cinder-csi-plugin
repository: registry.k8s.io/provider-os/cinder-csi-plugin
pullPolicy: IfNotPresent
tag: # defaults to .Chart.AppVersion
volumes:
Expand Down
4 changes: 2 additions & 2 deletions charts/manila-csi-plugin/Chart.yaml
@@ -1,8 +1,8 @@
apiVersion: v1
appVersion: latest
appVersion: v1.26.3
description: Manila CSI Chart for OpenStack
name: openstack-manila-csi
version: 1.6.0
version: 2.26.1
home: http://github.com/kubernetes/cloud-provider-openstack
icon: https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png
maintainers:
Expand Down
Expand Up @@ -7,5 +7,5 @@ metadata:
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.manila.csi.openstack.org/aggregate-to-{{ include "openstack-manila-csi.controllerplugin.fullname" . }}: "true"
rbac.manila.csi.openstack.org/aggregate-to-controller-{{ include "openstack-manila-csi.name" . }}: "true"
rules: []
Expand Up @@ -4,7 +4,7 @@ metadata:
name: {{ include "openstack-manila-csi.controllerplugin.fullname" . }}-rules
labels:
{{- include "openstack-manila-csi.controllerplugin.labels" . | nindent 4 }}
rbac.manila.csi.openstack.org/aggregate-to-{{ include "openstack-manila-csi.controllerplugin.fullname" . }}: "true"
rbac.manila.csi.openstack.org/aggregate-to-controller-{{ include "openstack-manila-csi.name" . }}: "true"
rules:
- apiGroups: [""]
resources: ["nodes"]
Expand Down
Expand Up @@ -7,5 +7,5 @@ metadata:
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.manila.csi.openstack.org/aggregate-to-{{ include "openstack-manila-csi.nodeplugin.fullname" . }}: "true"
rbac.manila.csi.openstack.org/aggregate-to-nodeplugin-{{ include "openstack-manila-csi.name" . }}: "true"
rules: []
Expand Up @@ -4,7 +4,7 @@ metadata:
name: {{ include "openstack-manila-csi.nodeplugin.fullname" . }}-rules
labels:
{{- include "openstack-manila-csi.nodeplugin.labels" . | nindent 4 }}
rbac.manila.csi.openstack.org/aggregate-to-{{ include "openstack-manila-csi.nodeplugin.fullname" . }}: "true"
rbac.manila.csi.openstack.org/aggregate-to-nodeplugin-{{ include "openstack-manila-csi.name" . }}: "true"
rules:
- apiGroups: [""]
resources: ["configmaps"]
Expand Down
2 changes: 1 addition & 1 deletion charts/manila-csi-plugin/values.yaml
Expand Up @@ -41,7 +41,7 @@ csimanila:

# Image spec
image:
repository: k8scloudprovider/manila-csi-plugin
repository: registry.k8s.io/provider-os/manila-csi-plugin
pullPolicy: IfNotPresent
tag: # defaults to .Chart.AppVersion

Expand Down
7 changes: 2 additions & 5 deletions charts/openstack-cloud-controller-manager/Chart.yaml
@@ -1,14 +1,11 @@
apiVersion: v1
appVersion: latest
appVersion: v1.26.3
description: Openstack Cloud Controller Manager Helm Chart
icon: https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/6e4619c416ff4bd19e1c087f27a43eea/www-images-prod/openstack-logo/OpenStack-Logo-Vertical.png
home: https://github.com/kubernetes/cloud-provider-openstack
name: openstack-cloud-controller-manager
version: 1.4.0
version: 2.26.1
maintainers:
- name: morremeyer
email: kubernetes@maurice-meyer.de
url: https://maurice-meyer.de
- name: eumel8
email: f.kloeker@telekom.de
url: https://www.telekom.com
2 changes: 1 addition & 1 deletion charts/openstack-cloud-controller-manager/values.yaml
Expand Up @@ -4,7 +4,7 @@
#
# Image repository name and tag
image:
repository: docker.io/k8scloudprovider/openstack-cloud-controller-manager
repository: registry.k8s.io/provider-os/openstack-cloud-controller-manager
tag: ""

# Additional containers which are run before the app containers are started.
Expand Down
2 changes: 1 addition & 1 deletion cluster/images/cinder-csi-plugin/Dockerfile
Expand Up @@ -13,7 +13,7 @@
ARG DEBIAN_ARCH=amd64
# We not using scratch because we need to keep the basic image information
# from parent image
FROM k8s.gcr.io/build-image/debian-base-${DEBIAN_ARCH}:bullseye-v1.4.2
FROM registry.k8s.io/build-image/debian-base-${DEBIAN_ARCH}:bullseye-v1.4.3

ARG ARCH=amd64

Expand Down
2 changes: 1 addition & 1 deletion cluster/images/cinder-csi-plugin/Dockerfile.build
@@ -1,5 +1,5 @@
ARG DEBIAN_ARCH=amd64
FROM k8s.gcr.io/build-image/debian-base-${DEBIAN_ARCH}:bullseye-v1.4.2
FROM registry.k8s.io/build-image/debian-base-${DEBIAN_ARCH}:bullseye-v1.4.3

ARG ARCH=amd64

Expand Down
2 changes: 1 addition & 1 deletion docs/barbican-kms-plugin/using-barbican-kms-plugin.md
Expand Up @@ -83,7 +83,7 @@ $ docker run -d --volume=/var/lib/kms:/var/lib/kms \
--volume=/etc/kubernetes:/etc/kubernetes \
-e socketpath=/var/lib/kms/kms.sock \
-e cloudconfig=/etc/kubernetes/cloud-config \
docker.io/k8scloudprovider/barbican-kms-plugin-amd64:latest
registry.k8s.io/provider-os/barbican-kms-plugin:v1.26.3
```
6. Create /etc/kubernetes/encryption-config.yaml
```
Expand Down
2 changes: 1 addition & 1 deletion docs/cinder-csi-plugin/using-cinder-csi-plugin.md
Expand Up @@ -36,7 +36,7 @@ This plugin is compatible with CSI versions v1.3.0, v1.2.0 , v1.1.0, and v1.0.0

## Downloads

Stable released version images of the plugin can be found at [Docker Hub](https://hub.docker.com/r/k8scloudprovider/cinder-csi-plugin)
Stable released version images of the plugin can be pulled from `registry.k8s.io/provider-os/cinder-csi-plugin:[release tag]`

## Kubernetes Compatibility

Expand Down
Expand Up @@ -251,8 +251,8 @@ Now we are ready to create the k8s-keystone-auth deployment and expose
it as a service. There are several things we need to notice in the
deployment manifest:

- We are using the official nightly-built image
`k8scloudprovider/k8s-keystone-auth:latest`
- We are using image
`registry.k8s.io/provider-os/k8s-keystone-auth:v1.26.3`
- We use `k8s-auth-policy` configmap created above.
- The pod uses service account `keystone-auth` created above.
- We use `keystone-auth-certs` secret created above to inject the
Expand Down
2 changes: 1 addition & 1 deletion docs/magnum-auto-healer/using-magnum-auto-healer.md
Expand Up @@ -73,7 +73,7 @@ user_id=ceb61464a3d341ebabdf97d1d4b97099
user_project_id=b23a5e41d1af4c20974bf58b4dff8e5a
password=password
region=RegionOne
image=k8scloudprovider/magnum-auto-healer:latest
image=registry.k8s.io/provider-os/magnum-auto-healer:v1.26.3

cat <<EOF | kubectl apply -f -
---
Expand Down
30 changes: 30 additions & 0 deletions docs/manila-csi-plugin/using-manila-csi-plugin.md
Expand Up @@ -50,6 +50,7 @@ Parameter | Required | Description
`type` | _yes_ | Manila [share type](https://wiki.openstack.org/wiki/Manila/Concepts#share_type)
`shareNetworkID` | _no_ | Manila [share network ID](https://wiki.openstack.org/wiki/Manila/Concepts#share_network)
`availability` | _no_ | Manila availability zone of the provisioned share. If none is provided, the default Manila zone will be used. Note that this parameter is opaque to the CO and does not influence placement of workloads that will consume this share, meaning they may be scheduled onto any node of the cluster. If the specified Manila AZ is not equally accessible from all compute nodes of the cluster, use [Topology-aware dynamic provisioning](#topology-aware-dynamic-provisioning).
`autoTopology` | _no_ | When set to "true" and the `availability` parameter is empty, the Manila CSI controller will map the Manila availability zone to the target compute node availability zone.
`appendShareMetadata` | _no_ | Append user-defined metadata to the provisioned share. If not empty, this field must be a string with a valid JSON object. The object must consist of key-value pairs of type string. Example: `"{..., \"key\": \"value\"}"`.
`cephfs-mounter` | _no_ | Relevant for CephFS Manila shares. Specifies which mounting method to use with the CSI CephFS driver. Available options are `kernel` and `fuse`, defaults to `fuse`. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information.
`cephfs-kernelMountOptions` | _no_ | Relevant for CephFS Manila shares. Specifies mount options for CephFS kernel client. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information.
Expand Down Expand Up @@ -130,6 +131,35 @@ Storage AZ does not influence
Shares in zone-a are accessible only from nodes in nova-1 and nova-2.
```

In cases when the Manila availability zone must correspond to the Nova
availability zone, you can set the `autoTopology: "true"` along with the
`volumeBindingMode: WaitForFirstConsumer` and omit the `availability`
parameter. By doing so, the share will be provisioned in the target compute
node availability zone.

```
Auto topology-aware storage class example:
Both Compute and Storage AZs influence the placement of workloads.
+-----------+ +---------------+
| Manila AZ | | Compute AZs |
| zone-1 | apiVersion: storage.k8s.io/v1 | zone-1 |
| zone-2 | kind: StorageClass | zone-2 |
+-----------+ metadata: +---------------+
| name: nfs-gold |
| provisioner: nfs.manila.csi.openstack.org |
| parameters: |
+---------+ autoTopology: "true" +--------------------+
...
volumeBindingMode: WaitForFirstConsumer
...
Shares for workloads in zone-1 will be created in zone-1 and accessible only from nodes in zone-1.
Shares for workloads in zone-2 will be created in zone-2 and accessible only from nodes in zone-2.
```

[Enabling topology awareness in Kubernetes](#enabling-topology-awareness)

### Runtime configuration file
Expand Down
Expand Up @@ -148,7 +148,7 @@ Here are several other config options are not included in the example configurat
### Deploy octavia-ingress-controller

```shell
image="docker.io/k8scloudprovider/octavia-ingress-controller:latest"
image="registry.k8s.io/provider-os/octavia-ingress-controller:v1.26.3"

cat <<EOF > /etc/kubernetes/octavia-ingress-controller/deployment.yaml
---
Expand Down
Expand Up @@ -227,6 +227,11 @@ Request Body:

This annotations explicitly sets a hostname in the status of the load balancer service.

- `loadbalancer.openstack.org/load-balancer-address`

This annotation is automatically added and it contains the floating ip address of the load balancer service.
When using `loadbalancer.openstack.org/hostname` annotation it is the only place to see the real address of the load balancer.

### Switching between Floating Subnets by using preconfigured Classes

If you have multiple `FloatingIPPools` and/or `FloatingIPSubnets` it might be desirable to offer the user logical meanings for `LoadBalancers` like `internetFacing` or `DMZ` instead of requiring the user to select a dedicated network or subnet ID at the service object level as an annotation.
Expand Down
Expand Up @@ -174,6 +174,11 @@ The options in `Global` section are used for openstack-cloud-controller-manager
For example, this option can be useful when having multiple or dual-stack interfaces attached to a node and needing a user-controlled, deterministic way of sorting the addresses.
Default: ""

### Router

* `router-id`
Specifies the Neutron router ID to manage Kubernetes cluster routes, e.g. for load balancers or compute instances that are not part of the Kubernetes cluster.

### Load Balancer

Although the openstack-cloud-controller-manager was initially implemented with Neutron-LBaaS support, Octavia is recommended now because Neutron-LBaaS has been deprecated since Queens OpenStack release cycle and no longer accepted new feature enhancements. As a result, lots of advanced features in openstack-cloud-controller-manager rely on Octavia, even the CI is running based on Octavia enabled OpenStack environment. Functionalities are not guaranteed if using Neutron-LBaaS.
Expand Down
19 changes: 19 additions & 0 deletions examples/manila-csi-plugin/nfs/auto-topology-aware/pod.yaml
@@ -0,0 +1,19 @@
apiVersion: v1
kind: Pod
metadata:
name: new-nfs-share-pod
spec:
containers:
- name: web-server
image: nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- name: mypvc
mountPath: /var/lib/www
nodeSelector:
topology.kubernetes.io/zone: zone-1
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: new-nfs-share-pvc
readOnly: false
11 changes: 11 additions & 0 deletions examples/manila-csi-plugin/nfs/auto-topology-aware/pvc.yaml
@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: new-nfs-share-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
storageClassName: csi-manila-nfs
@@ -0,0 +1,29 @@
# Topology constraints example:
#
# Let's have two Manila AZs: zone-{1..2}
# Let's have six Nova AZs: zone-{1..6}
#
# Manila zone-1 is accessible from nodes in zone-1 only
# Manila zone-2 is accessible from nodes in zone-2 only
#
# We're provisioning into zone-1
# availability parameter and allowedTopologies are empty, therefore the dynamic
# share provisioning with automatic availability zone selection takes place.
# The "volumeBindingMode" must be set to "WaitForFirstConsumer".

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-manila-nfs
provisioner: nfs.manila.csi.openstack.org
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
parameters:
type: default
autoTopology: "true"
csi.storage.k8s.io/provisioner-secret-name: csi-manila-secrets
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-manila-secrets
csi.storage.k8s.io/node-stage-secret-namespace: default
csi.storage.k8s.io/node-publish-secret-name: csi-manila-secrets
csi.storage.k8s.io/node-publish-secret-namespace: default
Expand Up @@ -9,4 +9,3 @@ spec:
requests:
storage: 1Gi
storageClassName: csi-manila-nfs

Expand Up @@ -24,6 +24,7 @@ parameters:
csi.storage.k8s.io/node-stage-secret-namespace: default
csi.storage.k8s.io/node-publish-secret-name: csi-manila-secrets
csi.storage.k8s.io/node-publish-secret-namespace: default
allowVolumeExpansion: true
allowedTopologies:
- matchLabelExpressions:
- key: topology.manila.csi.openstack.org/zone
Expand Down
2 changes: 1 addition & 1 deletion examples/webhook/keystone-deployment.yaml
Expand Up @@ -18,7 +18,7 @@ spec:
serviceAccountName: k8s-keystone
containers:
- name: k8s-keystone-auth
image: k8scloudprovider/k8s-keystone-auth:latest
image: registry.k8s.io/provider-os/k8s-keystone-auth:v1.26.3
args:
- ./bin/k8s-keystone-auth
- --tls-cert-file
Expand Down

0 comments on commit c59255a

Please sign in to comment.