Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge https://github.com/kubernetes/cloud-provider-openstack:release-1.23 into release-4.10 #167

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Expand Up @@ -175,7 +175,7 @@ fmt:
lint:
ifndef HAS_LINT
echo "installing lint"
go get -u golang.org/x/lint/golint
go install golang.org/x/lint/golint@latest
endif
hack/verify-golint.sh

Expand Down
2 changes: 1 addition & 1 deletion charts/cinder-csi-plugin/Chart.yaml
@@ -1,5 +1,5 @@
apiVersion: v1
appVersion: latest
appVersion: v1.23.0
description: Cinder CSI Chart for OpenStack
name: openstack-cinder-csi
version: 2.1.0
Expand Down
2 changes: 1 addition & 1 deletion charts/manila-csi-plugin/Chart.yaml
@@ -1,5 +1,5 @@
apiVersion: v1
appVersion: latest
appVersion: v1.23.0
description: Manila CSI Chart for OpenStack
name: openstack-manila-csi
version: 1.4.0
Expand Down
2 changes: 1 addition & 1 deletion charts/openstack-cloud-controller-manager/Chart.yaml
@@ -1,5 +1,5 @@
apiVersion: v1
appVersion: "latest"
appVersion: v1.23.0
description: Openstack Cloud Controller Manager Helm Chart
icon: https://object-storage-ca-ymq-1.vexxhost.net/swift/v1/6e4619c416ff4bd19e1c087f27a43eea/www-images-prod/openstack-logo/OpenStack-Logo-Vertical.png
home: https://github.com/kubernetes/cloud-provider-openstack
Expand Down
2 changes: 1 addition & 1 deletion cluster/images/barbican-kms-plugin/Dockerfile
Expand Up @@ -13,7 +13,7 @@
ARG ALPINE_ARCH=amd64
# We not using scratch because we need to keep the basic image information
# from parent image
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64

Expand Down
2 changes: 1 addition & 1 deletion cluster/images/barbican-kms-plugin/Dockerfile.build
@@ -1,5 +1,5 @@
ARG ALPINE_ARCH=amd64
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4
LABEL maintainers="Kubernetes Authors"
LABEL description="Barbican KMS Plugin"

Expand Down
2 changes: 1 addition & 1 deletion cluster/images/k8s-keystone-auth/Dockerfile
Expand Up @@ -13,7 +13,7 @@
ARG ALPINE_ARCH=amd64
# We not using scratch because we need to keep the basic image information
# from parent image
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64

Expand Down
2 changes: 1 addition & 1 deletion cluster/images/k8s-keystone-auth/Dockerfile.build
Expand Up @@ -11,7 +11,7 @@
# limitations under the License.

ARG ALPINE_ARCH=amd64
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64

Expand Down
2 changes: 1 addition & 1 deletion cluster/images/magnum-auto-healer/Dockerfile
Expand Up @@ -13,7 +13,7 @@
ARG ALPINE_ARCH=amd64
# We not using scratch because we need to keep the basic image information
# from parent image
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64

Expand Down
2 changes: 1 addition & 1 deletion cluster/images/magnum-auto-healer/Dockerfile.build
Expand Up @@ -11,7 +11,7 @@
# limitations under the License.

ARG ALPINE_ARCH=amd64
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64

Expand Down
2 changes: 1 addition & 1 deletion cluster/images/manila-csi-plugin/Dockerfile
Expand Up @@ -13,7 +13,7 @@
ARG ALPINE_ARCH=amd64
# We not using scratch because we need to keep the basic image information
# from parent image
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64

Expand Down
2 changes: 1 addition & 1 deletion cluster/images/manila-csi-plugin/Dockerfile.build
@@ -1,5 +1,5 @@
ARG ALPINE_ARCH=amd64
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64

Expand Down
2 changes: 1 addition & 1 deletion cluster/images/octavia-ingress-controller/Dockerfile
Expand Up @@ -13,7 +13,7 @@
ARG ALPINE_ARCH=amd64
# We not using scratch because we need to keep the basic image information
# from parent image
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64

Expand Down
2 changes: 1 addition & 1 deletion cluster/images/octavia-ingress-controller/Dockerfile.build
Expand Up @@ -10,7 +10,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
ARG ALPINE_ARCH=amd64
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64

Expand Down
Expand Up @@ -13,7 +13,7 @@
ARG ALPINE_ARCH=amd64
# We not using scratch because we need to keep the basic image information
# from parent image
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64

Expand Down
Expand Up @@ -11,7 +11,7 @@
# limitations under the License.

ARG ALPINE_ARCH=amd64
FROM ${ALPINE_ARCH}/alpine:3.15
FROM ${ALPINE_ARCH}/alpine:3.15.4

ARG ARCH=amd64
RUN apk add --no-cache ca-certificates
Expand Down
2 changes: 1 addition & 1 deletion docs/barbican-kms-plugin/using-barbican-kms-plugin.md
Expand Up @@ -83,7 +83,7 @@ $ docker run -d --volume=/var/lib/kms:/var/lib/kms \
--volume=/etc/kubernetes:/etc/kubernetes \
-e socketpath=/var/lib/kms/kms.sock \
-e cloudconfig=/etc/kubernetes/cloud-config \
docker.io/k8scloudprovider/barbican-kms-plugin-amd64:latest
docker.io/k8scloudprovider/barbican-kms-plugin-amd64:v1.23.0
```
6. Create /etc/kubernetes/encryption-config.yaml
```
Expand Down
2 changes: 1 addition & 1 deletion docs/magnum-auto-healer/using-magnum-auto-healer.md
Expand Up @@ -73,7 +73,7 @@ user_id=ceb61464a3d341ebabdf97d1d4b97099
user_project_id=b23a5e41d1af4c20974bf58b4dff8e5a
password=password
region=RegionOne
image=k8scloudprovider/magnum-auto-healer:latest
image=k8scloudprovider/magnum-auto-healer:v1.23.0

cat <<EOF | kubectl apply -f -
---
Expand Down
Expand Up @@ -148,7 +148,7 @@ Here are several other config options are not included in the example configurat
### Deploy octavia-ingress-controller

```shell
image="docker.io/k8scloudprovider/octavia-ingress-controller:latest"
image="docker.io/k8scloudprovider/octavia-ingress-controller:v1.23.0"

cat <<EOF > /etc/kubernetes/octavia-ingress-controller/deployment.yaml
---
Expand Down
Expand Up @@ -167,6 +167,13 @@ The options in `Global` section are used for openstack-cloud-controller-manager
The name of Neutron external network. openstack-cloud-controller-manager uses this option when getting the external IP of the Kubernetes node. Can be specified multiple times. Specified network names will be ORed. Default: ""
* `internal-network-name`
The name of Neutron internal network. openstack-cloud-controller-manager uses this option when getting the internal IP of the Kubernetes node, this is useful if the node has multiple interfaces. Can be specified multiple times. Specified network names will be ORed. Default: ""
* `address-sort-order`
This configuration key influences the way the provider reports the node addresses to the Kubernetes node resource. The default order depends on the hard-coded order the provider queries the addresses and what the cloud returns, which does not guarantee a specific order.

To override this behavior it is possible to specify a comma separated list of CIDRs. Essentially, this will sort and group all addresses matching a CIDR in a prioritized manner, where the first item having a higher priority than the last. All non-matching addresses will remain in the same order they are already in.

For example, this option can be useful when having multiple or dual-stack interfaces attached to a node and needing a user-controlled, deterministic way of sorting the addresses.
Default: ""

### Load Balancer

Expand Down
2 changes: 1 addition & 1 deletion examples/webhook/keystone-deployment.yaml
Expand Up @@ -18,7 +18,7 @@ spec:
serviceAccountName: k8s-keystone
containers:
- name: k8s-keystone-auth
image: k8scloudprovider/k8s-keystone-auth:latest
image: k8scloudprovider/k8s-keystone-auth:v1.23.0
args:
- ./bin/k8s-keystone-auth
- --tls-cert-file
Expand Down
2 changes: 1 addition & 1 deletion manifests/barbican-kms/pod.yaml
Expand Up @@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: barbican-kms
image: docker.io/k8scloudprovider/barbican-kms-plugin:latest
image: docker.io/k8scloudprovider/barbican-kms-plugin:v1.23.0
args:
- "--socketpath=/kms/kms.sock"
- "--cloud-config=/etc/kubernetes/cloud-config"
Expand Down
Expand Up @@ -92,7 +92,7 @@ spec:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- name: cinder-csi-plugin
image: docker.io/k8scloudprovider/cinder-csi-plugin:latest
image: docker.io/k8scloudprovider/cinder-csi-plugin:v1.23.0
args:
- /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)"
Expand Down
2 changes: 1 addition & 1 deletion manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml
Expand Up @@ -53,7 +53,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: docker.io/k8scloudprovider/cinder-csi-plugin:latest
image: docker.io/k8scloudprovider/cinder-csi-plugin:v1.23.0
args:
- /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)"
Expand Down
Expand Up @@ -36,7 +36,7 @@ spec:
serviceAccountName: cloud-controller-manager
containers:
- name: openstack-cloud-controller-manager
image: docker.io/k8scloudprovider/openstack-cloud-controller-manager:latest
image: docker.io/k8scloudprovider/openstack-cloud-controller-manager:v1.23.0
args:
- /bin/openstack-cloud-controller-manager
- --v=1
Expand Down
Expand Up @@ -11,7 +11,7 @@ metadata:
spec:
containers:
- name: openstack-cloud-controller-manager
image: docker.io/k8scloudprovider/openstack-cloud-controller-manager:latest
image: docker.io/k8scloudprovider/openstack-cloud-controller-manager:v1.23.0
args:
- /bin/openstack-cloud-controller-manager
- --v=1
Expand Down
2 changes: 1 addition & 1 deletion manifests/magnum-auto-healer/magnum-auto-healer.yaml
Expand Up @@ -88,7 +88,7 @@ spec:
node-role.kubernetes.io/master: ""
containers:
- name: magnum-auto-healer
image: docker.io/k8scloudprovider/magnum-auto-healer:latest
image: docker.io/k8scloudprovider/magnum-auto-healer:v1.23.0
imagePullPolicy: Always
args:
- /bin/magnum-auto-healer
Expand Down
2 changes: 1 addition & 1 deletion manifests/manila-csi-plugin/csi-controllerplugin.yaml
Expand Up @@ -77,7 +77,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "k8scloudprovider/manila-csi-plugin:latest"
image: "k8scloudprovider/manila-csi-plugin:v1.23.0"
command: ["/bin/sh", "-c",
'/bin/manila-csi-plugin
--nodeid=$(NODE_ID)
Expand Down
2 changes: 1 addition & 1 deletion manifests/manila-csi-plugin/csi-nodeplugin.yaml
Expand Up @@ -50,7 +50,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: "k8scloudprovider/manila-csi-plugin:latest"
image: "k8scloudprovider/manila-csi-plugin:v1.23.0"
command: ["/bin/sh", "-c",
'/bin/manila-csi-plugin
--nodeid=$(NODE_ID)
Expand Down
23 changes: 13 additions & 10 deletions pkg/autohealing/cloudprovider/openstack/provider.go
Expand Up @@ -251,10 +251,11 @@ func (provider OpenStackCloudProvider) waitForServerDetachVolumes(serverID strin
}

// FirstTimeRepair Handle the first time repair for a node
// 1) If the node is the first time in error, reboot and uncordon it
// 2) If the node is not the first time in error, check if the last reboot time is in provider.Config.RebuildDelayAfterReboot
// That said, if the node has been found in broken status before but has been long time since then, the processed variable
// will be kept as False, which means the node need to be rebuilt to fix it, otherwise it means the has been processed.
// 1. If the node is the first time in error, reboot and uncordon it
// 2. If the node is not the first time in error, check if the last reboot time is in provider.Config.RebuildDelayAfterReboot
// That said, if the node has been found in broken status before but has been long time since then, the processed variable
// will be kept as False, which means the node need to be rebuilt to fix it, otherwise it means the has been processed.
//
// The bool type return value means that if the node has been processed from a first time repair PoV
func (provider OpenStackCloudProvider) firstTimeRepair(n healthcheck.NodeInfo, serverID string, firstTimeRebootNodes map[string]healthcheck.NodeInfo) (bool, error) {
var firstTimeUnhealthy = true
Expand Down Expand Up @@ -312,12 +313,14 @@ func (provider OpenStackCloudProvider) firstTimeRepair(n healthcheck.NodeInfo, s
}

// Repair For master nodes: detach etcd and docker volumes, find the root
// volume, then shutdown the VM, marks the both the VM and the root
// volume (heat resource) as "unhealthy" then trigger Heat stack update
// in order to rebuild the node. The information this function needs:
// - Nova VM ID
// - Root volume ID
// - Heat stack ID and resource ID.
//
// volume, then shutdown the VM, marks the both the VM and the root
// volume (heat resource) as "unhealthy" then trigger Heat stack update
// in order to rebuild the node. The information this function needs:
// - Nova VM ID
// - Root volume ID
// - Heat stack ID and resource ID.
//
// For worker nodes: Call Magnum resize API directly.
func (provider OpenStackCloudProvider) Repair(nodes []healthcheck.NodeInfo) error {
if len(nodes) == 0 {
Expand Down
2 changes: 1 addition & 1 deletion pkg/autohealing/cloudprovider/register/register.go
Expand Up @@ -65,7 +65,7 @@ func registerOpenStack(cfg config.Config, kubeClient kubernetes.Interface) (clou

// get cinder service client
var cinderClient *gophercloud.ServiceClient
cinderClient, err = gopenstack.NewBlockStorageV2(client, eoOpts)
cinderClient, err = gopenstack.NewBlockStorageV3(client, eoOpts)
if err != nil {
return nil, fmt.Errorf("failed to find Cinder service endpoint in the region %s: %v", cfg.OpenStack.Region, err)
}
Expand Down
5 changes: 4 additions & 1 deletion pkg/csi/cinder/controllerserver.go
Expand Up @@ -365,7 +365,10 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
properties := map[string]string{cinderCSIClusterIDKey: cs.Driver.cluster}

// see https://github.com/kubernetes-csi/external-snapshotter/pull/375/
for _, mKey := range []string{"csi.storage.k8s.io/volumesnapshot/name", "csi.storage.k8s.io/volumesnapshot/namespace", "csi.storage.k8s.io/volumesnapshotcontent/name"} {
// Also, we don't want to tag every param but we still want to send the
// 'force-create' flag to openstack layer so that we will honor the
// force create functions
for _, mKey := range []string{"csi.storage.k8s.io/volumesnapshot/name", "csi.storage.k8s.io/volumesnapshot/namespace", "csi.storage.k8s.io/volumesnapshotcontent/name", openstack.SnapshotForceCreate} {
if v, ok := req.Parameters[mKey]; ok {
properties[mKey] = v
}
Expand Down
2 changes: 2 additions & 0 deletions pkg/csi/cinder/controllerserver_test.go
Expand Up @@ -494,6 +494,7 @@ func TestCreateSnapshotWithExtraMetadata(t *testing.T) {
"csi.storage.k8s.io/volumesnapshot/name": FakeSnapshotName,
"csi.storage.k8s.io/volumesnapshotcontent/name": FakeSnapshotContentName,
"csi.storage.k8s.io/volumesnapshot/namespace": FakeSnapshotNamespace,
openstack.SnapshotForceCreate: "true",
}

osmock.On("CreateSnapshot", FakeSnapshotName, FakeVolID, &properties).Return(&FakeSnapshotRes, nil)
Expand All @@ -511,6 +512,7 @@ func TestCreateSnapshotWithExtraMetadata(t *testing.T) {
"csi.storage.k8s.io/volumesnapshot/name": FakeSnapshotName,
"csi.storage.k8s.io/volumesnapshotcontent/name": FakeSnapshotContentName,
"csi.storage.k8s.io/volumesnapshot/namespace": FakeSnapshotNamespace,
openstack.SnapshotForceCreate: "true",
},
}

Expand Down
8 changes: 4 additions & 4 deletions pkg/csi/cinder/openstack/openstack_snapshots.go
Expand Up @@ -37,7 +37,7 @@ const (
snapReadySteps = 10

snapshotDescription = "Created by OpenStack Cinder CSI driver"
snapshotForceCreate = "force-create"
SnapshotForceCreate = "force-create"
)

// CreateSnapshot issues a request to take a Snapshot of the specified Volume with the corresponding ID and
Expand All @@ -47,14 +47,14 @@ func (os *OpenStack) CreateSnapshot(name, volID string, tags *map[string]string)
force := false
// if no flag given, then force will be false by default
// if flag it given , check it
if item, ok := (*tags)[snapshotForceCreate]; ok {
if item, ok := (*tags)[SnapshotForceCreate]; ok {
var err error
force, err = strconv.ParseBool(item)
if err != nil {
klog.V(5).Infof("Make force create flag to false due to: %v", err)
}

delete(*tags, snapshotForceCreate)
delete(*tags, SnapshotForceCreate)
}
// Force the creation of snapshot even the Volume is in in-use state
opts := &snapshots.CreateOpts{
Expand Down Expand Up @@ -142,7 +142,7 @@ func (os *OpenStack) DeleteSnapshot(snapID string) error {
return err
}

//GetSnapshotByID returns snapshot details by id
// GetSnapshotByID returns snapshot details by id
func (os *OpenStack) GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, error) {
s, err := snapshots.Get(os.blockstorage, snapshotID).Extract()
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions pkg/csi/cinder/openstack/openstack_volumes.go
Expand Up @@ -232,7 +232,7 @@ func (os *OpenStack) WaitDiskAttached(instanceID string, volumeID string) error
return err
}

//WaitVolumeTargetStatus waits for volume to be in target state
// WaitVolumeTargetStatus waits for volume to be in target state
func (os *OpenStack) WaitVolumeTargetStatus(volumeID string, tStatus []string) error {
backoff := wait.Backoff{
Duration: operationFinishInitDelay,
Expand Down Expand Up @@ -367,7 +367,7 @@ func (os *OpenStack) ExpandVolume(volumeID string, status string, newSize int) e
return fmt.Errorf("volume cannot be resized, when status is %s", status)
}

//GetMaxVolLimit returns max vol limit
// GetMaxVolLimit returns max vol limit
func (os *OpenStack) GetMaxVolLimit() int64 {
if os.bsOpts.NodeVolumeAttachLimit > 0 && os.bsOpts.NodeVolumeAttachLimit <= 256 {
return os.bsOpts.NodeVolumeAttachLimit
Expand Down