-
Notifications
You must be signed in to change notification settings - Fork 0
/
nodeserver.go
349 lines (303 loc) · 12.6 KB
/
nodeserver.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
package hostpath
import (
"os"
"fmt"
"strings"
"github.com/golang/glog"
"golang.org/x/net/context"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
const TopologyKeyNode = "topology.hostpath.csi/node"
type nodeServer struct {
nodeID string
ephemeral bool
maxVolumePerNode int64
}
// NewNodeServer returns as helper object
func NewNodeServer(nodeId string, ephemeral bool, maxVolumesPerNode int64) *nodeServer {
return &nodeServer{
nodeID: nodeId,
ephemeral: ephemeral,
maxVolumePerNode: maxVolumesPerNode,
}
}
// NodePublishVolume is called to mount the volume from staging to target path. Usually what you do here is a
// bind mount. A bind mount allows you to mount a path to a different path (instead of mounting a device to
// a path). In Kubernetes, this allows us for example to use the mounted volume from the staging path (i.e
// global directory) to the targte path (pod directory).
// Here, formatting is not needed because we already did it in NodeStageVolume.
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
// Check arguments
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
}
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
targetPath := req.GetTargetPath()
ephemeralVolume := req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true" ||
req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "" && ns.ephemeral // Kubernetes 1.15 doesn't have csi.storage.k8s.io/ephemeral
if req.GetVolumeCapability().GetBlock() != nil &&
req.GetVolumeCapability().GetMount() != nil {
return nil, status.Error(codes.InvalidArgument, "cannot have both block and mount access type")
}
// if ephemal is specified, create volume here to avoid errors
if ephemeralVolume {
volID := req.GetVolumeId()
volName := fmt.Sprintf("ephemeral=%s", volID)
vol, err := createHostpathVolume(req.GetVolumeId(), volName, maxStorageCapacity, mountAccess, ephemeralVolume)
if err != nil && !os.IsExist(err) {
glog.Error("ephemeral mode failed to create volume: ", err)
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("ephemeral mode: created volume: %s", vol.VolPath)
}
vol, err := getVolumeByID(req.GetVolumeId())
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
if req.GetVolumeCapability().GetBlock() != nil {
if vol.VolAccessType != blockAccess {
return nil, status.Error(codes.InvalidArgument, "cannot publish a non-block volume as block volume")
}
volPathHandler := volumepathhandler.VolumePathHandler{}
// Get loop device from the volume path
loopDevice, err := volPathHandler.GetLoopDevice(vol.VolPath)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to get the loop device: %v", err))
}
mounter := mount.New("")
// Check if the target path exists. Create if not present.
_, err = os.Lstat(targetPath)
if os.IsNotExist(err) {
// if err = mounter.MakeFile(targetPath); err != nil {
if err = makeFile(targetPath); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to create target path: %s: %v", targetPath, err))
}
}
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to check if the target block file exists: %v", err)
}
// Check if the target path is already mounted. Prevent remounting
// notMount, err := mounter.IsNotMountPoint(targetPath)
notMount, err := mount.IsNotMountPoint(mounter, targetPath)
if err != nil {
if !os.IsNotExist(err) {
return nil, status.Errorf(codes.Internal, "error checking path %s for mount: %s", targetPath, err)
}
notMount = true
}
if !notMount {
// Its already mounted.
glog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
options := []string{"bind"}
if err := mount.New("").Mount(loopDevice, targetPath, "", options); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount block device: %s at %s: %v", loopDevice, targetPath, err))
}
} else if req.GetVolumeCapability().GetMount() != nil {
if vol.VolAccessType != mountAccess {
return nil, status.Error(codes.InvalidArgument, "cannot publish a non-mount volume as mount volume")
}
mounter := mount.New("")
notMnt, err := mount.IsNotMountPoint(mounter, targetPath)
if err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(targetPath, 0750); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
notMnt = true
} else {
return nil, status.Error(codes.Internal, err.Error())
}
}
if !notMnt {
return &csi.NodePublishVolumeResponse{}, nil
}
fsType := req.GetVolumeCapability().GetMount().GetFsType()
deviceId := ""
if req.GetPublishContext() != nil {
deviceId = req.GetPublishContext()[deviceID]
}
readOnly := req.GetReadonly()
volumeId := req.GetVolumeId()
attrib := req.GetVolumeContext()
mountFlags := req.GetVolumeCapability().GetMount().GetMountFlags()
glog.V(4).Infof("target %v", targetPath)
glog.V(4).Infof("fstype %v", fsType)
glog.V(4).Infof("device %v", deviceId)
glog.V(4).Infof("readonly %v", readOnly)
glog.V(4).Infof("volumeId %v", volumeId)
glog.V(4).Infof("attributes %v", attrib)
glog.V(4).Infof("mountflags %v", mountFlags)
options := []string{"bind"}
if readOnly {
options = append(options, "ro")
}
// mounter := mount.New("")
path := getVolumePath(volumeId)
if err := mounter.Mount(path, targetPath, "", options); err != nil {
var errList strings.Builder
errList.WriteString(err.Error())
if vol.Ephemeral {
if rmErr := os.RemoveAll(path); rmErr != nil && !os.IsNotExist(rmErr) {
errList.WriteString(fmt.Sprintf(" :%s", rmErr.Error()))
}
}
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to mount block device: %s at %s: %s", path, targetPath, errList.String()))
}
}
return &csi.NodePublishVolumeResponse{}, nil
}
// NodeUnpublishVolume is the reverse of NodePublishVolume. It unmounts the volume from the target path.
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
targetPath := req.GetTargetPath()
volumeID := req.GetVolumeId()
vol, err := getVolumeByID(volumeID)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
// Unmount only if the target path is really a mount point.
if notMnt, err := mount.IsNotMountPoint(mount.New(""), targetPath); err != nil {
if !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, err.Error())
}
} else if !notMnt {
// Unmounting the image or filesystem.
err = mount.New("").Unmount(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
// Delete the mount point.
// Does not return error for non-existent path, repeat calls OK for idempotency.
if err = os.RemoveAll(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("hostpath: volume %s has been unpublished.", targetPath)
if vol.Ephemeral {
glog.V(4).Infof("deleting volume %s", volumeID)
if err := deleteHostpathVolume(volumeID); err != nil && !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, fmt.Sprintf("failed to delete volume: %s", err))
}
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// NodeStageVolume is called by Controller Orchestrator to temporarily mount the volume to a staging path.
// Usually this staging path is a global directory on the node. In Kubernetes, after it's mounted to the
// global directory, you mount it into the pod directory (via NodePublishVolume). The reason that mounting
// is a two step operation is because Kubernetes allows you to use a single volume by multiple pods. This
// allowed when the storage system supports it (say NFS) or if a ll pods run on the same node. One thing
// to note is that you also need to format the volume if it's not formatted already. Keep that in mind.
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetStagingTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capability missing in request")
}
return &csi.NodeStageVolumeResponse{}, nil
}
// NodeUnstageVolume is the reverse of NodeStageVolume. Called by Controller Orchestrator to unmount the
// volume from the staging path.
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetStagingTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missin gin request")
}
return &csi.NodeUnstageVolumeResponse{}, nil
}
// NodeGetInfo returns the supported capabilities of the node server. This should
// eventually return the droplet ID if possible. This is sused so the Controller
// Orchestrator knows where to place the workload. The result of this function will
// be used by the Controller Orchestrator in the ControllerPublishVolume.
func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) {
topology := &csi.Topology{
Segments: map[string]string{TopologyKeyNode: ns.nodeID},
}
return &csi.NodeGetInfoResponse{
NodeId: ns.nodeID,
MaxVolumesPerNode: ns.maxVolumePerNode,
AccessibleTopology: topology,
}, nil
}
// NodeGetCapabilities returns the capabilities of the Node plugin. For example, if you don't advertise
// RPC_STAGE_UNSTAGE_VOLUME capabilities, the Controller Orchestrator will not call NodeStageVolume and
// NodeUnstageVolume as you don't provide it.
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
return &csi.NodeGetCapabilitiesResponse{
Capabilities: []*csi.NodeServiceCapability{
{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
},
},
},
{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_EXPAND_VOLUME,
},
},
},
},
}, nil
}
func (ns *nodeServer) NodeGetVolumeStats(ctx context.Context, in *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
volID := req.GetVolumeId()
if len(volID) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID not provided")
}
vol, err := getVolumeByID(volID)
if err != nil {
// Assume not found error
return nil, status.Errorf(codes.NotFound, "Cound not get volume %s: %v", volID, err)
}
volPath := req.GetVolumePath()
if len(volPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume path not provided")
}
info, err := os.Stat(volPath)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "Could not get file information from %s: %v", volPath, err)
}
switch m := info.Mode(); {
case m.IsDir():
if vol.VolAccessType != mountAccess {
return nil, status.Errorf(codes.InvalidArgument, "Volume %s is not a directory", volID)
}
case m&os.ModeDevice != 0:
if vol.VolAccessType != blockAccess {
return nil, status.Errorf(codes.InvalidArgument, "Volume %s is invalid", volID)
}
default:
return nil, status.Errorf(codes.InvalidArgument, "Volume %s is invalid", volID)
}
return &csi.NodeExpandVolumeResponse{}, nil
}