Skip to content

Commit

Permalink
Support UNSPECIFID zone and project in volume ID. Static driver name.…
Browse files Browse the repository at this point in the history
… Prep for Migration
  • Loading branch information
davidz627 committed Oct 9, 2018
1 parent 5291315 commit bbc6725
Show file tree
Hide file tree
Showing 8 changed files with 132 additions and 21 deletions.
7 changes: 5 additions & 2 deletions cmd/main.go
Expand Up @@ -34,10 +34,13 @@ func init() {

var (
endpoint = flag.String("endpoint", "unix:/tmp/csi.sock", "CSI endpoint")
driverName = flag.String("drivername", "com.google.csi.gcepd", "name of the driver")
vendorVersion string
)

const (
driverName = "com.google.csi.gcepd"
)

func main() {
flag.Parse()
rand.Seed(time.Now().UnixNano())
Expand Down Expand Up @@ -67,7 +70,7 @@ func handle() {
glog.Fatalf("Failed to set up metadata service: %v", err)
}

err = gceDriver.SetupGCEDriver(cloudProvider, mounter, deviceUtils, ms, *driverName, vendorVersion)
err = gceDriver.SetupGCEDriver(cloudProvider, mounter, deviceUtils, ms, driverName, vendorVersion)
if err != nil {
glog.Fatalf("Failed to initialize GCE CSI Driver: %v", err)
}
Expand Down
2 changes: 2 additions & 0 deletions pkg/common/constants.go
Expand Up @@ -26,4 +26,6 @@ const (

// VolumeAttributes for Partition
VolumeAttributePartition = "partition"

UnspecifiedValue = "UNSPECIFIED"
)
9 changes: 9 additions & 0 deletions pkg/common/utils.go
Expand Up @@ -27,7 +27,9 @@ import (
const (
// Volume ID Expected Format
// "projects/{projectName}/zones/{zoneName}/disks/{diskName}"
volIDZonalFmt = "projects/%s/zones/%s/disks/%s"
// "projects/{projectName}/regions/{regionName}/disks/{diskName}"
volIDRegionalFmt = "projects/%s/regions/%s/disks/%s"
volIDToplogyKey = 2
volIDToplogyValue = 3
volIDDiskNameValue = 5
Expand Down Expand Up @@ -71,6 +73,13 @@ func VolumeIDToKey(id string) (*meta.Key, error) {
}
}

func GenerateUnderspecifiedVolumeID(diskName string, isZonal bool) string {
if isZonal {
return fmt.Sprintf(volIDZonalFmt, UnspecifiedValue, UnspecifiedValue, diskName)
}
return fmt.Sprintf(volIDRegionalFmt, UnspecifiedValue, UnspecifiedValue, diskName)
}

func SnapshotIDToKey(id string) (string, error) {
splitId := strings.Split(id, "/")
if len(splitId) != snapshotTotalElements {
Expand Down
14 changes: 9 additions & 5 deletions pkg/gce-cloud-provider/compute/fake-gce.go
Expand Up @@ -60,6 +60,10 @@ func FakeCreateCloudProvider(project, zone string) (*FakeCloudProvider, error) {

}

func (cloud *FakeCloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, volumeKey *meta.Key) (*meta.Key, error) {
return volumeKey, nil
}

func (cloud *FakeCloudProvider) ListZones(ctx context.Context, region string) ([]string, error) {
return []string{cloud.zone, "country-region-fakesecondzone"}, nil
}
Expand Down Expand Up @@ -199,12 +203,12 @@ func (cloud *FakeCloudProvider) DeleteDisk(ctx context.Context, volKey *meta.Key
return nil
}

func (cloud *FakeCloudProvider) AttachDisk(ctx context.Context, disk *CloudDisk, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error {
func (cloud *FakeCloudProvider) AttachDisk(ctx context.Context, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error {
source := cloud.GetDiskSourceURI(volKey)

attachedDiskV1 := &compute.AttachedDisk{
DeviceName: disk.GetName(),
Kind: disk.GetKind(),
DeviceName: volKey.Name,
Kind: diskKind,
Mode: readWrite,
Source: source,
Type: diskType,
Expand All @@ -217,14 +221,14 @@ func (cloud *FakeCloudProvider) AttachDisk(ctx context.Context, disk *CloudDisk,
return nil
}

func (cloud *FakeCloudProvider) DetachDisk(ctx context.Context, volKey *meta.Key, instanceZone, instanceName string) error {
func (cloud *FakeCloudProvider) DetachDisk(ctx context.Context, deviceName, instanceZone, instanceName string) error {
instance, ok := cloud.instances[instanceName]
if !ok {
return fmt.Errorf("Failed to get instance %v", instanceName)
}
found := -1
for i, disk := range instance.Disks {
if disk.DeviceName == volKey.Name {
if disk.DeviceName == deviceName {
found = i
break
}
Expand Down
57 changes: 47 additions & 10 deletions pkg/gce-cloud-provider/compute/gce-compute.go
Expand Up @@ -34,16 +34,18 @@ import (
const (
operationStatusDone = "DONE"
waitForSnapshotCreationTimeOut = 2 * time.Minute
diskKind = "compute#disk"
)

type GCECompute interface {
// Disk Methods
GetDisk(ctx context.Context, volumeKey *meta.Key) (*CloudDisk, error)
RepairUnderspecifiedVolumeKey(ctx context.Context, volumeKey *meta.Key) (*meta.Key, error)
ValidateExistingDisk(ctx context.Context, disk *CloudDisk, diskType string, reqBytes, limBytes int64) error
InsertDisk(ctx context.Context, volKey *meta.Key, diskType string, capBytes int64, capacityRange *csi.CapacityRange, replicaZones []string) error
DeleteDisk(ctx context.Context, volumeKey *meta.Key) error
AttachDisk(ctx context.Context, disk *CloudDisk, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error
DetachDisk(ctx context.Context, volKey *meta.Key, instanceZone, instanceName string) error
AttachDisk(ctx context.Context, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error
DetachDisk(ctx context.Context, deviceName string, instanceZone, instanceName string) error
GetDiskSourceURI(volKey *meta.Key) string
GetDiskTypeURI(volKey *meta.Key, diskType string) string
WaitForAttach(ctx context.Context, volKey *meta.Key, instanceZone, instanceName string) error
Expand All @@ -59,7 +61,46 @@ type GCECompute interface {
DeleteSnapshot(ctx context.Context, snapshotName string) error
}

// RepairUnderspecifiedVolumeKey will query the cloud provider and check each zone for the disk specified
// by the volume key and return a volume key with a correct zone
func (cloud *CloudProvider) RepairUnderspecifiedVolumeKey(ctx context.Context, volumeKey *meta.Key) (*meta.Key, error) {
region, err := common.GetRegionFromZones([]string{cloud.zone})
if err != nil {
return nil, fmt.Errorf("failed to get region from zones: %v", err)
}
switch volumeKey.Type() {
case meta.Zonal:
if volumeKey.Zone == common.UnspecifiedValue {
// list all zones, try to get disk in each zone
zones, err := cloud.ListZones(ctx, region)
if err != nil {
return nil, err
}
for _, zone := range zones {
_, err := cloud.getZonalDiskOrError(ctx, zone, volumeKey.Name)
if err == nil {
// If there is no error we have found a disk
volumeKey.Zone = zone
return volumeKey, nil
}
}
return nil, fmt.Errorf("volume zone unspecified and unable to find in any of these zones %v", zones)
}
return volumeKey, nil
case meta.Regional:
if volumeKey.Region == common.UnspecifiedValue {
volumeKey.Region = region
}
return volumeKey, nil
default:
return nil, fmt.Errorf("key was neither zonal nor regional, got: %v", volumeKey.String())
}
}

func (cloud *CloudProvider) ListZones(ctx context.Context, region string) ([]string, error) {
if len(cloud.zonesCache[region]) > 0 {
return cloud.zonesCache[region], nil
}
zones := []string{}
zoneList, err := cloud.service.Zones.List(cloud.project).Filter(fmt.Sprintf("region eq .*%s$", region)).Do()
if err != nil {
Expand All @@ -68,6 +109,7 @@ func (cloud *CloudProvider) ListZones(ctx context.Context, region string) ([]str
for _, zone := range zoneList.Items {
zones = append(zones, zone.Name)
}
cloud.zonesCache[region] = zones
return zones, nil

}
Expand Down Expand Up @@ -315,7 +357,7 @@ func (cloud *CloudProvider) deleteRegionalDisk(ctx context.Context, region, name
return nil
}

func (cloud *CloudProvider) AttachDisk(ctx context.Context, disk *CloudDisk, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error {
func (cloud *CloudProvider) AttachDisk(ctx context.Context, volKey *meta.Key, readWrite, diskType, instanceZone, instanceName string) error {
source := cloud.GetDiskSourceURI(volKey)

deviceName, err := common.GetDeviceName(volKey)
Expand All @@ -324,7 +366,7 @@ func (cloud *CloudProvider) AttachDisk(ctx context.Context, disk *CloudDisk, vol
}
attachedDiskV1 := &compute.AttachedDisk{
DeviceName: deviceName,
Kind: disk.GetKind(),
Kind: diskKind,
Mode: readWrite,
Source: source,
Type: diskType,
Expand All @@ -341,12 +383,7 @@ func (cloud *CloudProvider) AttachDisk(ctx context.Context, disk *CloudDisk, vol
return nil
}

func (cloud *CloudProvider) DetachDisk(ctx context.Context, volKey *meta.Key, instanceZone, instanceName string) error {
deviceName, err := common.GetDeviceName(volKey)
if err != nil {
return err
}

func (cloud *CloudProvider) DetachDisk(ctx context.Context, deviceName, instanceZone, instanceName string) error {
op, err := cloud.service.Instances.DetachDisk(cloud.project, instanceZone, instanceName, deviceName).Context(ctx).Do()
if err != nil {
return err
Expand Down
3 changes: 3 additions & 0 deletions pkg/gce-cloud-provider/compute/gce.go
Expand Up @@ -51,6 +51,8 @@ type CloudProvider struct {
betaService *beta.Service
project string
zone string

zonesCache map[string]([]string)
}

var _ GCECompute = &CloudProvider{}
Expand All @@ -76,6 +78,7 @@ func CreateCloudProvider(vendorVersion string) (*CloudProvider, error) {
betaService: betasvc,
project: project,
zone: zone,
zonesCache: make(map[string]([]string)),
}, nil

}
Expand Down
17 changes: 13 additions & 4 deletions pkg/gce-pd-csi-driver/controller.go
Expand Up @@ -191,6 +191,11 @@ func (gceCS *GCEControllerServer) DeleteVolume(ctx context.Context, req *csi.Del
return &csi.DeleteVolumeResponse{}, nil
}

volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, volKey)
if err != nil {
return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volumeID, err))
}

err = gceCS.CloudProvider.DeleteDisk(ctx, volKey)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Delete disk error: %v", err))
Expand Down Expand Up @@ -222,13 +227,17 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r
return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volumeID, err))
}

volKey, err = gceCS.CloudProvider.RepairUnderspecifiedVolumeKey(ctx, volKey)
if err != nil {
return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find volume with ID %v: %v", volumeID, err))
}
// TODO(#94): Check volume capability matches

pubVolResp := &csi.ControllerPublishVolumeResponse{
PublishInfo: nil,
}

disk, err := gceCS.CloudProvider.GetDisk(ctx, volKey)
_, err = gceCS.CloudProvider.GetDisk(ctx, volKey)
if err != nil {
if gce.IsGCEError(err, "notFound") {
return nil, status.Error(codes.NotFound, fmt.Sprintf("Could not find disk %v: %v", volKey.String(), err))
Expand Down Expand Up @@ -270,7 +279,7 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r
if err != nil {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("could not split nodeID: %v", err))
}
err = gceCS.CloudProvider.AttachDisk(ctx, disk, volKey, readWrite, attachableDiskTypePersistent, instanceZone, instanceName)
err = gceCS.CloudProvider.AttachDisk(ctx, volKey, readWrite, attachableDiskTypePersistent, instanceZone, instanceName)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("unknown Attach error: %v", err))
}
Expand All @@ -282,7 +291,7 @@ func (gceCS *GCEControllerServer) ControllerPublishVolume(ctx context.Context, r
return nil, status.Error(codes.Internal, fmt.Sprintf("unknown WaitForAttach error: %v", err))
}

glog.V(4).Infof("Disk %v attached to instance %v successfully", disk.GetName(), nodeID)
glog.V(4).Infof("Disk %v attached to instance %v successfully", volKey.Name, nodeID)
return pubVolResp, nil
}

Expand Down Expand Up @@ -326,7 +335,7 @@ func (gceCS *GCEControllerServer) ControllerUnpublishVolume(ctx context.Context,
return &csi.ControllerUnpublishVolumeResponse{}, nil
}

err = gceCS.CloudProvider.DetachDisk(ctx, volKey, instanceZone, instanceName)
err = gceCS.CloudProvider.DetachDisk(ctx, deviceName, instanceZone, instanceName)
if err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("unknown detach error: %v", err))
}
Expand Down
44 changes: 44 additions & 0 deletions test/e2e/tests/single_zone_e2e_test.go
Expand Up @@ -112,6 +112,50 @@ var _ = Describe("GCE PD CSI Driver", func() {

})

It("Should complete entire disk lifecycle with underspecified volume ID", func() {
testContext := getRandomTestContext()

p, z, _ := testContext.Instance.GetIdentity()
client := testContext.Client
instance := testContext.Instance

// Create Disk
volName := testNamePrefix + string(uuid.NewUUID())
_, err := client.CreateVolume(volName, nil, defaultSizeGb,
&csi.TopologyRequirement{
Requisite: []*csi.Topology{
{
Segments: map[string]string{common.TopologyKeyZone: z},
},
},
})
Expect(err).To(BeNil(), "CreateVolume failed with error: %v", err)

// Validate Disk Created
cloudDisk, err := computeService.Disks.Get(p, z, volName).Do()
Expect(err).To(BeNil(), "Could not get disk from cloud directly")
Expect(cloudDisk.Type).To(ContainSubstring(standardDiskType))
Expect(cloudDisk.Status).To(Equal(readyState))
Expect(cloudDisk.SizeGb).To(Equal(defaultSizeGb))
Expect(cloudDisk.Name).To(Equal(volName))

underSpecifiedID := common.GenerateUnderspecifiedVolumeID(volName, true /* isZonal */)

defer func() {
// Delete Disk
client.DeleteVolume(underSpecifiedID)
Expect(err).To(BeNil(), "DeleteVolume failed")

// Validate Disk Deleted
_, err = computeService.Disks.Get(p, z, volName).Do()
Expect(gce.IsGCEError(err, "notFound")).To(BeTrue(), "Expected disk to not be found")
}()

// Attach Disk
testAttachWriteReadDetach(underSpecifiedID, volName, instance, client, false /* readOnly */)

})

It("Should successfully create RePD in two zones in the drivers region when none are specified", func() {
Expect(testContexts).ToNot(BeEmpty())
testContext := getRandomTestContext()
Expand Down

0 comments on commit bbc6725

Please sign in to comment.