Skip to content

Commit

Permalink
Merge pull request #1663 from kubernetes-sigs/remove-attacher
Browse files Browse the repository at this point in the history
cleanup: remove csi-attacher config
  • Loading branch information
andyzhangx committed Jan 12, 2024
2 parents d24ad18 + 95df84b commit 4e96bdd
Show file tree
Hide file tree
Showing 7 changed files with 8 additions and 395 deletions.
Binary file modified charts/latest/azurefile-csi-driver-v0.0.0.tgz
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -86,28 +86,6 @@ spec:
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }}
- name: csi-attacher
{{- if hasPrefix "/" .Values.image.csiAttacher.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}"
{{- else }}
image: "{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}"
{{- end }}
args:
- "-v=2"
- "-csi-address=$(ADDRESS)"
- "-timeout=120s"
- "-leader-election"
- "--leader-election-namespace={{ .Release.Namespace }}"
- "--kube-api-qps=50"
- "--kube-api-burst=100"
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: {{ .Values.image.csiAttacher.pullPolicy }}
volumeMounts:
- mountPath: /csi
name: socket-dir
resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }}
- name: csi-snapshotter
{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}"
Expand Down
11 changes: 0 additions & 11 deletions charts/latest/azurefile-csi-driver/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,6 @@ image:
repository: /oss/kubernetes-csi/csi-provisioner
tag: v3.6.2
pullPolicy: IfNotPresent
csiAttacher:
repository: /oss/kubernetes-csi/csi-attacher
tag: v4.4.2
pullPolicy: IfNotPresent
csiResizer:
repository: /oss/kubernetes-csi/csi-resizer
tag: v1.9.2
Expand Down Expand Up @@ -70,13 +66,6 @@ controller:
requests:
cpu: 10m
memory: 20Mi
csiAttacher:
limits:
cpu: 1
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
csiResizer:
limits:
cpu: 1
Expand Down
22 changes: 0 additions & 22 deletions deploy/csi-azurefile-controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,28 +54,6 @@ spec:
requests:
cpu: 10m
memory: 20Mi
- name: csi-attacher
image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v4.4.2
args:
- "-v=2"
- "-csi-address=$(ADDRESS)"
- "-timeout=120s"
- "--leader-election"
- "--leader-election-namespace=kube-system"
- "--kube-api-qps=50"
- "--kube-api-burst=100"
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
resources:
limits:
memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: csi-snapshotter
image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v6.3.2
args:
Expand Down
12 changes: 4 additions & 8 deletions hack/verify-helm-chart.sh
Original file line number Diff line number Diff line change
Expand Up @@ -62,18 +62,14 @@ pip install --break-system-packages --ignore-installed --require-hashes -r ${PKG

# Extract images from csi-azurefile-controller.yaml
expected_csi_provisioner_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[0].image | head -n 1)"
expected_csi_attacher_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[1].image | head -n 1)"
expected_csi_snapshotter_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[2].image | head -n 1)"
expected_csi_resizer_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[3].image | head -n 1)"
expected_liveness_probe_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[4].image | head -n 1)"
expected_azurefile_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[5].image | head -n 1)"
expected_csi_snapshotter_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[1].image | head -n 1)"
expected_csi_resizer_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[2].image | head -n 1)"
expected_liveness_probe_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[3].image | head -n 1)"
expected_azurefile_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[4].image | head -n 1)"

csi_provisioner_image="$(get_image_from_helm_chart ".image.csiProvisioner")"
validate_image "${expected_csi_provisioner_image}" "${csi_provisioner_image}"

csi_attacher_image="$(get_image_from_helm_chart ".image.csiAttacher")"
validate_image "${expected_csi_attacher_image}" "${csi_attacher_image}"

csi_snapshotter_image="$(get_image_from_helm_chart ".snapshot.image.csiSnapshotter")"
validate_image "${expected_csi_snapshotter_image}" "${csi_snapshotter_image}"

Expand Down
115 changes: 4 additions & 111 deletions pkg/azurefile/controllerserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -810,120 +810,13 @@ func (d *Driver) ListVolumes(_ context.Context, _ *csi.ListVolumesRequest) (*csi
}

// ControllerPublishVolume make a volume available on some required node
func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
volumeID := req.GetVolumeId()
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID not provided")
}

volCap := req.GetVolumeCapability()
if volCap == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability not provided")
}

nodeID := req.GetNodeId()
if len(nodeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Node ID not provided")
}

volContext := req.GetVolumeContext()
_, accountName, accountKey, fileShareName, diskName, _, err := d.GetAccountInfo(ctx, volumeID, req.GetSecrets(), volContext)
// always check diskName first since if it's not vhd disk attach, ControllerPublishVolume is not necessary
if !strings.HasSuffix(diskName, vhdSuffix) {
klog.V(2).Infof("skip ControllerPublishVolume(%s) since it's not vhd disk attach", volumeID)
if useDataPlaneAPI(volContext) {
d.dataPlaneAPIVolMap.Store(volumeID, "")
}
return &csi.ControllerPublishVolumeResponse{}, nil
}
if err != nil {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("GetAccountInfo(%s) failed with error: %v", volumeID, err))
}

accessMode := volCap.GetAccessMode()
if accessMode == nil || accessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("unsupported AccessMode(%v) for volume(%s)", volCap.GetAccessMode(), volumeID))
}

if accessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY ||
accessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY {
// don't lock vhd disk here since it's readonly, while it's user's responsibility to make sure
// volume is used as ReadOnly, otherwise there would be data corruption for MULTI_NODE_MULTI_WRITER
klog.V(2).Infof("skip ControllerPublishVolume(%s) since volume is readonly mode", volumeID)
return &csi.ControllerPublishVolumeResponse{}, nil
}

if acquired := d.volumeLocks.TryAcquire(volumeID); !acquired {
return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsFmt, volumeID)
}
defer d.volumeLocks.Release(volumeID)

storageEndpointSuffix := d.cloud.Environment.StorageEndpointSuffix
fileURL, err := getFileURL(accountName, accountKey, storageEndpointSuffix, fileShareName, diskName)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("getFileURL(%s,%s,%s,%s) returned with error: %v", accountName, storageEndpointSuffix, fileShareName, diskName, err))
}
if fileURL == nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("getFileURL(%s,%s,%s,%s) returned empty fileURL", accountName, storageEndpointSuffix, fileShareName, diskName))
}

properties, err := fileURL.GetProperties(ctx)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("GetProperties for volume(%s) on node(%s) returned with error: %v", volumeID, nodeID, err))
}

attachedNodeID, ok := properties.NewMetadata()[metaDataNode]
if ok && attachedNodeID != "" && !strings.EqualFold(attachedNodeID, nodeID) {
return nil, status.Error(codes.Internal, fmt.Sprintf("volume(%s) cannot be attached to node(%s) since it's already attached to node(%s)", volumeID, nodeID, attachedNodeID))
}
if _, err = fileURL.SetMetadata(ctx, azfile.Metadata{metaDataNode: nodeID}); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("SetMetadata for volume(%s) on node(%s) returned with error: %v", volumeID, nodeID, err))
}
klog.V(2).Infof("ControllerPublishVolume: volume(%s) attached to node(%s) successfully", volumeID, nodeID)
return &csi.ControllerPublishVolumeResponse{}, nil
func (d *Driver) ControllerPublishVolume(_ context.Context, _ *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

// ControllerUnpublishVolume detach the volume on a specified node
func (d *Driver) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
volumeID := req.GetVolumeId()
if len(volumeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID not provided")
}

nodeID := req.GetNodeId()
if len(nodeID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Node ID not provided")
}

_, accountName, accountKey, fileShareName, diskName, _, err := d.GetAccountInfo(ctx, volumeID, req.GetSecrets(), map[string]string{})
// always check diskName first since if it's not vhd disk detach, ControllerUnpublishVolume is not necessary
if !strings.HasSuffix(diskName, vhdSuffix) {
klog.V(2).Infof("skip ControllerUnpublishVolume(%s) since it's not vhd disk detach", volumeID)
return &csi.ControllerUnpublishVolumeResponse{}, nil
}
if err != nil {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("GetAccountInfo(%s) failed with error: %v", volumeID, err))
}

if acquired := d.volumeLocks.TryAcquire(volumeID); !acquired {
return nil, status.Errorf(codes.Aborted, volumeOperationAlreadyExistsFmt, volumeID)
}
defer d.volumeLocks.Release(volumeID)

storageEndpointSuffix := d.cloud.Environment.StorageEndpointSuffix
fileURL, err := getFileURL(accountName, accountKey, storageEndpointSuffix, fileShareName, diskName)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("getFileURL(%s,%s,%s,%s) returned with error: %v", accountName, storageEndpointSuffix, fileShareName, diskName, err))
}
if fileURL == nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("getFileURL(%s,%s,%s,%s) returned empty fileURL", accountName, storageEndpointSuffix, fileShareName, diskName))
}

if _, err = fileURL.SetMetadata(ctx, azfile.Metadata{metaDataNode: ""}); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("SetMetadata for volume(%s) on node(%s) returned with error: %v", volumeID, nodeID, err))
}
klog.V(2).Infof("ControllerUnpublishVolume: volume(%s) detached from node(%s) successfully", volumeID, nodeID)
return &csi.ControllerUnpublishVolumeResponse{}, nil
func (d *Driver) ControllerUnpublishVolume(_ context.Context, _ *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

// CreateSnapshot create a snapshot
Expand Down

0 comments on commit 4e96bdd

Please sign in to comment.