From 859e2339902c6db4f34c56540799dc0a69225273 Mon Sep 17 00:00:00 2001 From: "Hantao (Will) Wang" Date: Sun, 7 Jul 2019 20:22:05 -0700 Subject: [PATCH 1/4] add ability for gce to bulk verify attached disks --- pkg/volume/gcepd/attacher.go | 35 +++++++++++++ pkg/volume/gcepd/attacher_test.go | 4 ++ pkg/volume/gcepd/gce_pd.go | 2 +- .../legacy-cloud-providers/gce/gce_disks.go | 52 +++++++++++++++++++ 4 files changed, 92 insertions(+), 1 deletion(-) diff --git a/pkg/volume/gcepd/attacher.go b/pkg/volume/gcepd/attacher.go index c3cecbbd06e4..a685b89a599d 100644 --- a/pkg/volume/gcepd/attacher.go +++ b/pkg/volume/gcepd/attacher.go @@ -141,6 +141,41 @@ func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Sp return volumesAttachedCheck, nil } +func (attacher *gcePersistentDiskAttacher) BulkVerifyVolumes(volumesByNode map[types.NodeName][]*volume.Spec) (map[types.NodeName]map[*volume.Spec]bool, error) { + volumesAttachedCheck := make(map[types.NodeName]map[*volume.Spec]bool) + diskNamesByNode := make(map[types.NodeName][]string) + volumeSpecToDiskName := make(map[*volume.Spec]string) + + for nodeName, volumeSpecs := range volumesByNode { + diskNames := []string{} + for _, spec := range volumeSpecs { + volumeSource, _, err := getVolumeSource(spec) + if err != nil { + klog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) + continue + } + diskNames = append(diskNames, volumeSource.PDName) + volumeSpecToDiskName[spec] = volumeSource.PDName + } + diskNamesByNode[nodeName] = diskNames + } + + attachedDisksByNode, err := attacher.gceDisks.BulkDisksAreAttached(diskNamesByNode) + if err != nil { + return nil, err + } + + for nodeName, volumeSpecs := range volumesByNode { + volumesAreAttachedToNode := make(map[*volume.Spec]bool) + for _, spec := range volumeSpecs { + diskName := volumeSpecToDiskName[spec] + volumesAreAttachedToNode[spec] = attachedDisksByNode[nodeName][diskName] + } + volumesAttachedCheck[nodeName] = volumesAreAttachedToNode + } + return volumesAttachedCheck, nil +} + // search Windows disk number by LUN func getDiskID(pdName string, exec mount.Exec) (string, error) { // TODO: replace Get-GcePdName with native windows support of Get-Disk, see issue #74674 diff --git a/pkg/volume/gcepd/attacher_test.go b/pkg/volume/gcepd/attacher_test.go index c067f07c27ac..59eb4a574f97 100644 --- a/pkg/volume/gcepd/attacher_test.go +++ b/pkg/volume/gcepd/attacher_test.go @@ -405,6 +405,10 @@ func (testcase *testcase) DisksAreAttached(diskNames []string, nodeName types.No return nil, errors.New("Not implemented") } +func (testcase *testcase) BulkDisksAreAttached(diskByNodes map[types.NodeName][]string) (map[types.NodeName]map[string]bool, error) { + return nil, errors.New("Not implemented") +} + func (testcase *testcase) CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error { return errors.New("Not implemented") } diff --git a/pkg/volume/gcepd/gce_pd.go b/pkg/volume/gcepd/gce_pd.go index 837266e85332..6668817a1b66 100644 --- a/pkg/volume/gcepd/gce_pd.go +++ b/pkg/volume/gcepd/gce_pd.go @@ -109,7 +109,7 @@ func (plugin *gcePersistentDiskPlugin) SupportsMountOption() bool { } func (plugin *gcePersistentDiskPlugin) SupportsBulkVolumeVerification() bool { - return false + return true } func (plugin *gcePersistentDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode { diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_disks.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_disks.go index 9cc2918464ba..c64e66cd1a06 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_disks.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_disks.go @@ -424,6 +424,10 @@ type Disks interface { // to the node with the specified NodeName. DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) + // BulkDisksAreAttached is a batch function to check if all corresponding disks are attached to the + // nodes specified with nodeName. + BulkDisksAreAttached(diskByNodes map[types.NodeName][]string) (map[types.NodeName]map[string]bool, error) + // CreateDisk creates a new PD with given properties. Tags are serialized // as JSON into Description field. CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error @@ -620,6 +624,37 @@ func (g *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (m return attached, nil } +// BulkDisksAreAttached is a batch function to check if all corresponding disks are attached to the +// nodes specified with nodeName. +func (g *Cloud) BulkDisksAreAttached(diskByNodes map[types.NodeName][]string) (map[types.NodeName]map[string]bool, error) { + instanceNames := []string{} + for nodeName := range diskByNodes { + instanceNames = append(instanceNames, mapNodeNameToInstanceName(nodeName)) + } + + // List all instances with the given instance names + // Then for each instance listed, add the disks attached to that instance to a map + listedInstances, err := g.getFoundInstanceByNames(instanceNames) + if err != nil { + return nil, fmt.Errorf("error listing instances: %v", err) + } + listedInstanceNamesToDisks := make(map[string][]*compute.AttachedDisk) + for _, instance := range listedInstances { + listedInstanceNamesToDisks[instance.Name] = instance.Disks + } + + verifyDisksAttached := make(map[types.NodeName]map[string]bool) + + // For each node and its desired attached disks that needs to be verified + for nodeName, disksToVerify := range diskByNodes { + instanceName := canonicalizeInstanceName(mapNodeNameToInstanceName(nodeName)) + disksActuallyAttached := listedInstanceNamesToDisks[instanceName] + verifyDisksAttached[nodeName] = verifyDisksAttachedToNode(disksToVerify, disksActuallyAttached) + } + + return verifyDisksAttached, nil +} + // CreateDisk creates a new Persistent Disk, with the specified name & // size, in the specified zone. It stores specified tags encoded in // JSON in Description field. @@ -990,3 +1025,20 @@ func isGCEError(err error, reason string) bool { } return false } + +// verifyDisksAttachedToNode takes in an slice of disks that should be attached to an instance, and the +// slice of disks actually attached to it. It returns a map verifying if the disks are actually attached. +func verifyDisksAttachedToNode(disksToVerify []string, disksActuallyAttached []*compute.AttachedDisk) map[string]bool { + verifiedDisks := make(map[string]bool) + diskNamesActuallyAttached := sets.NewString() + for _, disk := range disksActuallyAttached { + diskNamesActuallyAttached.Insert(disk.DeviceName) + } + + // For every disk that's supposed to be attached, verify that it is + for _, diskName := range disksToVerify { + verifiedDisks[diskName] = diskNamesActuallyAttached.Has(diskName) + } + + return verifiedDisks +} From cc5b177d5765136d0faa39564f87cba7c43939af Mon Sep 17 00:00:00 2001 From: "Hantao (Will) Wang" Date: Mon, 15 Jul 2019 16:14:51 -0700 Subject: [PATCH 2/4] move getInstancesByName logic to helper function --- .../gce/gce_instances.go | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go index ef705fa808b7..f1855ba8e4f4 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go @@ -426,6 +426,28 @@ func (g *Cloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNet) er // Gets the named instances, returning cloudprovider.InstanceNotFound if any // instance is not found func (g *Cloud) getInstancesByNames(names []string) ([]*gceInstance, error) { + instanceOrErrors, err := g.getInstanceOrErrorsByNames(names) + if err != nil { + return nil, err + } + var allInstances []*gceInstance + for _, entry := range instanceOrErrors { + if entry.err != nil { + return nil, entry.err + } + allInstances = append(allInstances, entry.instance) + } + return allInstances, nil +} + +type instanceOrError struct { + instance *gceInstance + err error +} + +// Gets the named instances, returning a map of each name to either the found instances or +// cloudprovider.InstanceNotFound if the instance is not found +func (g *Cloud) getInstanceOrErrorsByNames(allNames []string) (map[string]*instanceOrError, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() From c1ad5671f8afc487f8c060257f1518717071ad5d Mon Sep 17 00:00:00 2001 From: "Hantao (Will) Wang" Date: Tue, 16 Jul 2019 10:03:36 -0700 Subject: [PATCH 3/4] implement functionality to return all found instances --- .../gce/gce_instances.go | 44 +++++++------------ 1 file changed, 16 insertions(+), 28 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go index f1855ba8e4f4..5cd2679c66bc 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go @@ -426,28 +426,19 @@ func (g *Cloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNet) er // Gets the named instances, returning cloudprovider.InstanceNotFound if any // instance is not found func (g *Cloud) getInstancesByNames(names []string) ([]*gceInstance, error) { - instanceOrErrors, err := g.getInstanceOrErrorsByNames(names) + foundInstances, err := g.getFoundInstanceByNames(names) if err != nil { return nil, err } - var allInstances []*gceInstance - for _, entry := range instanceOrErrors { - if entry.err != nil { - return nil, entry.err - } - allInstances = append(allInstances, entry.instance) + if len(foundInstances) != len(names) { + return nil, cloudprovider.InstanceNotFound } - return allInstances, nil -} - -type instanceOrError struct { - instance *gceInstance - err error + return foundInstances, nil } -// Gets the named instances, returning a map of each name to either the found instances or -// cloudprovider.InstanceNotFound if the instance is not found -func (g *Cloud) getInstanceOrErrorsByNames(allNames []string) (map[string]*instanceOrError, error) { +// Gets the named instances, returning a list of gceInstances it was able to find from the provided +// list of names. +func (g *Cloud) getFoundInstanceByNames(names []string) ([]*gceInstance, error) { ctx, cancel := cloud.ContextWithCallTimeout() defer cancel() @@ -494,20 +485,17 @@ func (g *Cloud) getInstanceOrErrorsByNames(allNames []string) (map[string]*insta } } - if remaining > 0 { - var failed []string - for k := range found { - if found[k] == nil { - failed = append(failed, k) - } + var ret []*gceInstance + var failed []string + for name, instance := range found { + if instance != nil { + ret = append(ret, instance) + } else { + failed = append(failed, name) } - klog.Errorf("Failed to retrieve instances: %v", failed) - return nil, cloudprovider.InstanceNotFound } - - var ret []*gceInstance - for _, instance := range found { - ret = append(ret, instance) + if len(failed) > 0 { + klog.Errorf("Failed to retrieve instances: %v", failed) } return ret, nil From cb6786fe32dcf8d3b32d68b0a2562736d5133b95 Mon Sep 17 00:00:00 2001 From: "Hantao (Will) Wang" Date: Mon, 22 Jul 2019 16:20:10 -0700 Subject: [PATCH 4/4] add unit tests for attacher DisksAreAttached and BulkDisksAreAttached --- pkg/volume/gcepd/BUILD | 1 + pkg/volume/gcepd/attacher_test.go | 296 ++++++++++++++++++++++++------ 2 files changed, 241 insertions(+), 56 deletions(-) diff --git a/pkg/volume/gcepd/BUILD b/pkg/volume/gcepd/BUILD index 4b873daa32c8..76ab8ca354fc 100644 --- a/pkg/volume/gcepd/BUILD +++ b/pkg/volume/gcepd/BUILD @@ -59,6 +59,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", + "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/cloud-provider/volume:go_default_library", "//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library", "//vendor/k8s.io/klog:go_default_library", diff --git a/pkg/volume/gcepd/attacher_test.go b/pkg/volume/gcepd/attacher_test.go index 59eb4a574f97..729be7ab3b75 100644 --- a/pkg/volume/gcepd/attacher_test.go +++ b/pkg/volume/gcepd/attacher_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" + cloudprovider "k8s.io/cloud-provider" cloudvolume "k8s.io/cloud-provider/volume" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -85,7 +86,7 @@ func TestAttachDetachRegional(t *testing.T) { // Successful Attach call testcase := testcase{ name: "Attach_Regional_Positive", - diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil}, + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName: {diskName}}, nil}, attach: attachCall{diskName, nodeName, readOnly, regional, nil}, test: func(testcase *testcase) error { attacher := newAttacher(testcase) @@ -99,9 +100,8 @@ func TestAttachDetachRegional(t *testing.T) { err := testcase.test(&testcase) if err != testcase.expectedReturn { - t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedReturn.Error(), err.Error()) + t.Errorf("%s failed: expected err=%v, got %v", testcase.name, testcase.expectedReturn, err) } - t.Logf("Test %q succeeded", testcase.name) } func TestAttachDetach(t *testing.T) { @@ -117,7 +117,7 @@ func TestAttachDetach(t *testing.T) { // Successful Attach call { name: "Attach_Positive", - diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil}, + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName: {}}, nil}, attach: attachCall{diskName, nodeName, readOnly, regional, nil}, test: func(testcase *testcase) error { attacher := newAttacher(testcase) @@ -132,7 +132,7 @@ func TestAttachDetach(t *testing.T) { // Disk is already attached { name: "Attach_Positive_AlreadyAttached", - diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil}, + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName: {diskName}}, nil}, test: func(testcase *testcase) error { attacher := newAttacher(testcase) devicePath, err := attacher.Attach(spec, nodeName) @@ -146,7 +146,7 @@ func TestAttachDetach(t *testing.T) { // DiskIsAttached fails and Attach succeeds { name: "Attach_Positive_CheckFails", - diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError}, + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName: {}}, diskCheckError}, attach: attachCall{diskName, nodeName, readOnly, regional, nil}, test: func(testcase *testcase) error { attacher := newAttacher(testcase) @@ -161,7 +161,7 @@ func TestAttachDetach(t *testing.T) { // Attach call fails { name: "Attach_Negative", - diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError}, + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName: {}}, diskCheckError}, attach: attachCall{diskName, nodeName, readOnly, regional, attachError}, test: func(testcase *testcase) error { attacher := newAttacher(testcase) @@ -177,7 +177,7 @@ func TestAttachDetach(t *testing.T) { // Detach succeeds { name: "Detach_Positive", - diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil}, + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName: {diskName}}, nil}, detach: detachCall{diskName, nodeName, nil}, test: func(testcase *testcase) error { detacher := newDetacher(testcase) @@ -188,7 +188,7 @@ func TestAttachDetach(t *testing.T) { // Disk is already detached { name: "Detach_Positive_AlreadyDetached", - diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil}, + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName: {}}, nil}, test: func(testcase *testcase) error { detacher := newDetacher(testcase) return detacher.Detach(diskName, nodeName) @@ -198,7 +198,7 @@ func TestAttachDetach(t *testing.T) { // Detach succeeds when DiskIsAttached fails { name: "Detach_Positive_CheckFails", - diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError}, + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName: {}}, diskCheckError}, detach: detachCall{diskName, nodeName, nil}, test: func(testcase *testcase) error { detacher := newDetacher(testcase) @@ -209,7 +209,7 @@ func TestAttachDetach(t *testing.T) { // Detach fails { name: "Detach_Negative", - diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError}, + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName: {}}, diskCheckError}, detach: detachCall{diskName, nodeName, detachError}, test: func(testcase *testcase) error { detacher := newDetacher(testcase) @@ -223,9 +223,177 @@ func TestAttachDetach(t *testing.T) { testcase.t = t err := testcase.test(&testcase) if err != testcase.expectedReturn { - t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedReturn.Error(), err.Error()) + t.Errorf("%s failed: expected err=%v, got %v", testcase.name, testcase.expectedReturn, err) + } + } +} + +func TestVerifyVolumesAttached(t *testing.T) { + readOnly := false + nodeName1 := types.NodeName("instance1") + nodeName2 := types.NodeName("instance2") + + diskAName := "diskA" + diskBName := "diskB" + diskCName := "diskC" + diskASpec := createVolSpec(diskAName, readOnly) + diskBSpec := createVolSpec(diskBName, readOnly) + diskCSpec := createVolSpec(diskCName, readOnly) + + verifyDiskAttachedInResult := func(results map[*volume.Spec]bool, spec *volume.Spec, expected bool) error { + found, ok := results[spec] + if !ok { + return fmt.Errorf("expected to find volume %s in verifcation result, but didn't", spec.Name()) + } + if found != expected { + return fmt.Errorf("expected to find volume %s to be have attached value %v but got %v", spec.Name(), expected, found) + } + return nil + } + + tests := []testcase{ + // Successful VolumesAreAttached + { + name: "VolumesAreAttached_Positive", + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName1: {diskAName, diskBName}}, nil}, + test: func(testcase *testcase) error { + attacher := newAttacher(testcase) + results, err := attacher.VolumesAreAttached([]*volume.Spec{diskASpec, diskBSpec}, nodeName1) + if err != nil { + return err + } + err = verifyDiskAttachedInResult(results, diskASpec, true) + if err != nil { + return err + } + return verifyDiskAttachedInResult(results, diskBSpec, true) + }, + }, + + // Successful VolumesAreAttached for detached disk + { + name: "VolumesAreAttached_Negative", + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName1: {diskAName}}, nil}, + test: func(testcase *testcase) error { + attacher := newAttacher(testcase) + results, err := attacher.VolumesAreAttached([]*volume.Spec{diskASpec, diskBSpec}, nodeName1) + if err != nil { + return err + } + err = verifyDiskAttachedInResult(results, diskASpec, true) + if err != nil { + return err + } + return verifyDiskAttachedInResult(results, diskBSpec, false) + }, + }, + + // VolumesAreAttached with InstanceNotFound + { + name: "VolumesAreAttached_InstanceNotFound", + diskIsAttached: diskIsAttachedCall{disksAttachedMap{}, nil}, + expectedReturn: cloudprovider.InstanceNotFound, + test: func(testcase *testcase) error { + attacher := newAttacher(testcase) + _, err := attacher.VolumesAreAttached([]*volume.Spec{diskASpec}, nodeName1) + if err != cloudprovider.InstanceNotFound { + return fmt.Errorf("expected InstanceNotFound error, but got %v", err) + } + return err + }, + }, + + // Successful BulkDisksAreAttached + { + name: "BulkDisksAreAttached_Positive", + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName1: {diskAName}, nodeName2: {diskBName, diskCName}}, nil}, + test: func(testcase *testcase) error { + attacher := newAttacher(testcase) + results, err := attacher.BulkVerifyVolumes(map[types.NodeName][]*volume.Spec{nodeName1: {diskASpec}, nodeName2: {diskBSpec, diskCSpec}}) + if err != nil { + return err + } + disksAttachedNode1, nodeFound := results[nodeName1] + if !nodeFound { + return fmt.Errorf("expected to find node %s but didn't", nodeName1) + } + if err := verifyDiskAttachedInResult(disksAttachedNode1, diskASpec, true); err != nil { + return err + } + disksAttachedNode2, nodeFound := results[nodeName2] + if !nodeFound { + return fmt.Errorf("expected to find node %s but didn't", nodeName2) + } + if err := verifyDiskAttachedInResult(disksAttachedNode2, diskBSpec, true); err != nil { + return err + } + return verifyDiskAttachedInResult(disksAttachedNode2, diskCSpec, true) + }, + }, + + // Successful BulkDisksAreAttached for detached disk + { + name: "BulkDisksAreAttached_Negative", + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName1: {}, nodeName2: {diskBName}}, nil}, + test: func(testcase *testcase) error { + attacher := newAttacher(testcase) + results, err := attacher.BulkVerifyVolumes(map[types.NodeName][]*volume.Spec{nodeName1: {diskASpec}, nodeName2: {diskBSpec, diskCSpec}}) + if err != nil { + return err + } + disksAttachedNode1, nodeFound := results[nodeName1] + if !nodeFound { + return fmt.Errorf("expected to find node %s but didn't", nodeName1) + } + if err := verifyDiskAttachedInResult(disksAttachedNode1, diskASpec, false); err != nil { + return err + } + disksAttachedNode2, nodeFound := results[nodeName2] + if !nodeFound { + return fmt.Errorf("expected to find node %s but didn't", nodeName2) + } + if err := verifyDiskAttachedInResult(disksAttachedNode2, diskBSpec, true); err != nil { + return err + } + return verifyDiskAttachedInResult(disksAttachedNode2, diskCSpec, false) + }, + }, + + // Successful BulkDisksAreAttached with InstanceNotFound + { + name: "BulkDisksAreAttached_InstanceNotFound", + diskIsAttached: diskIsAttachedCall{disksAttachedMap{nodeName1: {diskAName}}, nil}, + test: func(testcase *testcase) error { + attacher := newAttacher(testcase) + results, err := attacher.BulkVerifyVolumes(map[types.NodeName][]*volume.Spec{nodeName1: {diskASpec}, nodeName2: {diskBSpec, diskCSpec}}) + if err != nil { + return err + } + disksAttachedNode1, nodeFound := results[nodeName1] + if !nodeFound { + return fmt.Errorf("expected to find node %s but didn't", nodeName1) + } + if err := verifyDiskAttachedInResult(disksAttachedNode1, diskASpec, true); err != nil { + return err + } + disksAttachedNode2, nodeFound := results[nodeName2] + if !nodeFound { + return fmt.Errorf("expected to find node %s but didn't", nodeName2) + } + if err := verifyDiskAttachedInResult(disksAttachedNode2, diskBSpec, false); err != nil { + return err + } + return verifyDiskAttachedInResult(disksAttachedNode2, diskCSpec, false) + }, + }, + } + + for _, testcase := range tests { + testcase.t = t + err := testcase.test(&testcase) + if err != testcase.expectedReturn { + t.Errorf("%s failed: expected err=%v, got %v", testcase.name, testcase.expectedReturn, err) } - t.Logf("Test %q succeeded", testcase.name) } } @@ -300,113 +468,129 @@ type attachCall struct { nodeName types.NodeName readOnly bool regional bool - ret error + retErr error } type detachCall struct { devicePath string nodeName types.NodeName - ret error + retErr error } type diskIsAttachedCall struct { - diskName string - nodeName types.NodeName - isAttached bool - ret error + attachedDisks disksAttachedMap + retErr error } +// disksAttachedMap specifies what disks in the test scenario are actually attached to each node +type disksAttachedMap map[types.NodeName][]string + func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool, regional bool) error { expected := &testcase.attach if expected.diskName == "" && expected.nodeName == "" { - // testcase.attach looks uninitialized, test did not expect to call - // AttachDisk - testcase.t.Errorf("Unexpected AttachDisk call!") + // testcase.attach looks uninitialized, test did not expect to call AttachDisk return errors.New("unexpected AttachDisk call") } if expected.diskName != diskName { - testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName) - return errors.New("Unexpected AttachDisk call: wrong diskName") + return fmt.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName) } if expected.nodeName != nodeName { - testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName) - return errors.New("Unexpected AttachDisk call: wrong nodeName") + return fmt.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName) } if expected.readOnly != readOnly { - testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly) - return errors.New("Unexpected AttachDisk call: wrong readOnly") + return fmt.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly) } if expected.regional != regional { - testcase.t.Errorf("Unexpected AttachDisk call: expected regional %v, got %v", expected.regional, regional) - return errors.New("Unexpected AttachDisk call: wrong regional") + return fmt.Errorf("Unexpected AttachDisk call: expected regional %v, got %v", expected.regional, regional) } - klog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, nodeName, readOnly, expected.ret) + klog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, nodeName, readOnly, expected.retErr) - return expected.ret + return expected.retErr } func (testcase *testcase) DetachDisk(devicePath string, nodeName types.NodeName) error { expected := &testcase.detach if expected.devicePath == "" && expected.nodeName == "" { - // testcase.detach looks uninitialized, test did not expect to call - // DetachDisk - testcase.t.Errorf("Unexpected DetachDisk call!") + // testcase.detach looks uninitialized, test did not expect to call DetachDisk return errors.New("unexpected DetachDisk call") } if expected.devicePath != devicePath { - testcase.t.Errorf("Unexpected DetachDisk call: expected devicePath %s, got %s", expected.devicePath, devicePath) - return errors.New("Unexpected DetachDisk call: wrong diskName") + return fmt.Errorf("Unexpected DetachDisk call: expected devicePath %s, got %s", expected.devicePath, devicePath) } if expected.nodeName != nodeName { - testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName) - return errors.New("Unexpected DetachDisk call: wrong nodeName") + return fmt.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName) } - klog.V(4).Infof("DetachDisk call: %s, %s, returning %v", devicePath, nodeName, expected.ret) + klog.V(4).Infof("DetachDisk call: %s, %s, returning %v", devicePath, nodeName, expected.retErr) - return expected.ret + return expected.retErr } func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) { expected := &testcase.diskIsAttached - if expected.diskName == "" && expected.nodeName == "" { - // testcase.diskIsAttached looks uninitialized, test did not expect to - // call DiskIsAttached - testcase.t.Errorf("Unexpected DiskIsAttached call!") + if expected.attachedDisks == nil { + // testcase.attachedDisks looks uninitialized, test did not expect to call DiskIsAttached return false, errors.New("unexpected DiskIsAttached call") } - if expected.diskName != diskName { - testcase.t.Errorf("Unexpected DiskIsAttached call: expected diskName %s, got %s", expected.diskName, diskName) - return false, errors.New("Unexpected DiskIsAttached call: wrong diskName") + if expected.retErr != nil { + return false, expected.retErr } - if expected.nodeName != nodeName { - testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName) - return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName") + disksForNode, nodeExists := expected.attachedDisks[nodeName] + if !nodeExists { + return false, cloudprovider.InstanceNotFound } - klog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret) - - return expected.isAttached, expected.ret + found := false + for _, diskAttachedName := range disksForNode { + if diskAttachedName == diskName { + found = true + } + } + klog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v", diskName, nodeName, found) + return found, nil } func (testcase *testcase) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { - return nil, errors.New("Not implemented") + verifiedDisks := make(map[string]bool) + for _, name := range diskNames { + found, err := testcase.DiskIsAttached(name, nodeName) + if err != nil { + return nil, err + } + verifiedDisks[name] = found + } + return verifiedDisks, nil } func (testcase *testcase) BulkDisksAreAttached(diskByNodes map[types.NodeName][]string) (map[types.NodeName]map[string]bool, error) { - return nil, errors.New("Not implemented") + verifiedDisksByNodes := make(map[types.NodeName]map[string]bool) + for nodeName, disksForNode := range diskByNodes { + verifiedDisks, err := testcase.DisksAreAttached(disksForNode, nodeName) + if err != nil { + if err != cloudprovider.InstanceNotFound { + return nil, err + } + verifiedDisks = make(map[string]bool) + for _, diskName := range disksForNode { + verifiedDisks[diskName] = false + } + } + verifiedDisksByNodes[nodeName] = verifiedDisks + } + + return verifiedDisksByNodes, nil } func (testcase *testcase) CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error {