Skip to content

Commit

Permalink
fixed unit tests and added level of indirection for GetAllCurrentZone…
Browse files Browse the repository at this point in the history
…s through manager. Will squash after
  • Loading branch information
davidz627 committed Sep 15, 2017
1 parent d5462ec commit a24264e
Show file tree
Hide file tree
Showing 4 changed files with 98 additions and 62 deletions.
61 changes: 48 additions & 13 deletions pkg/cloudprovider/providers/gce/gce.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package gce
import (
"fmt"
"io"
"log"
"net/http"
"regexp"
"strconv"
Expand All @@ -31,6 +32,7 @@ import (
"cloud.google.com/go/compute/metadata"

"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/options/encryptionconfig"
Expand All @@ -42,6 +44,7 @@ import (
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"

"github.com/golang/glog"
"golang.org/x/oauth2"
Expand Down Expand Up @@ -100,19 +103,22 @@ type GCECloud struct {
// for the cloudprovider to start watching the configmap.
ClusterID ClusterID

service *compute.Service
serviceBeta *computebeta.Service
serviceAlpha *computealpha.Service
containerService *container.Service
cloudkmsService *cloudkms.Service
client clientset.Interface
clientBuilder controller.ControllerClientBuilder
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
projectID string
region string
localZone string // The zone in which we are running
managedZones []string // List of zones we are spanning (for multi-AZ clusters, primarily when running on master)
service *compute.Service
serviceBeta *computebeta.Service
serviceAlpha *computealpha.Service
containerService *container.Service
cloudkmsService *cloudkms.Service
client clientset.Interface
clientBuilder controller.ControllerClientBuilder
eventBroadcaster record.EventBroadcaster
eventRecorder record.EventRecorder
projectID string
region string
localZone string // The zone in which we are running
// managedZones will be set to the 1 zone if running a single zone cluster
// it will be set to ALL zones in region for any multi-zone cluster
// Use GetAllCurrentZones to get only zones that contain nodes
managedZones []string
networkURL string
subnetworkURL string
secondaryRangeName string
Expand Down Expand Up @@ -185,6 +191,9 @@ type ServiceManager interface {

// Waits until GCE reports the given operation in the given region is done.
WaitForRegionalOp(op gceObject, mc *metricContext) error

// GetAllCurrentZones returns all the zones in which nodes are currently running
GetAllCurrentZones(client clientset.Interface) (sets.String, error)
}

type GCEServiceManager struct {
Expand Down Expand Up @@ -1003,6 +1012,32 @@ func (manager *GCEServiceManager) DeleteRegionalDisk(
return nil, fmt.Errorf("DeleteRegionalDisk is a regional PD feature and is only available via the GCE Alpha API. Enable \"GCEDiskAlphaAPI\" in the list of \"alpha-features\" in \"gce.conf\" to use the feature.")
}

// GetAllCurrentZones returns all the zones in which nodes are currently running
// TODO: Caching, but this is currently only called when we are creating a volume,
// which is a relatively infrequent operation, and this is only 1 API call.
// Ideally we want to make this watch-based
func (manager *GCEServiceManager) GetAllCurrentZones(client clientset.Interface) (sets.String, error) {
zones := sets.NewString()
if client == nil {
return nil, fmt.Errorf("Client not set on GCECloud object")
}
nodeList, err := client.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}

for _, node := range nodeList.Items {
labels := node.ObjectMeta.Labels
zone, ok := labels[kubeletapis.LabelZoneFailureDomain]
if ok {
zones.Insert(zone)
} else {
log.Printf("Could not find zone for node %v", node.ObjectMeta.Name)
}
}
return zones, nil
}

func (manager *GCEServiceManager) WaitForZoneOp(
op gceObject, zone string, mc *metricContext) error {
return manager.gce.waitForZoneOp(op, zone, mc)
Expand Down
74 changes: 48 additions & 26 deletions pkg/cloudprovider/providers/gce/gce_disks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
)
Expand All @@ -37,7 +38,8 @@ func TestCreateDisk_Basic(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
t.Error(featureGateErr)
Expand Down Expand Up @@ -95,7 +97,8 @@ func TestCreateRegionalDisk_Basic(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1", "zone3", "zone2"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{GCEDiskAlphaFeatureGate})
if featureGateErr != nil {
t.Error(featureGateErr)
Expand Down Expand Up @@ -153,7 +156,8 @@ func TestCreateDisk_DiskAlreadyExists(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
t.Error(featureGateErr)
Expand Down Expand Up @@ -184,7 +188,8 @@ func TestCreateDisk_WrongZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}

diskName := "disk"
Expand All @@ -204,7 +209,8 @@ func TestCreateDisk_NoManagedZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
gce := GCECloud{manager: fakeManager, managedZones: []string{}}

diskName := "disk"
Expand All @@ -224,7 +230,8 @@ func TestCreateDisk_BadDiskType(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}

diskName := "disk"
Expand All @@ -245,7 +252,8 @@ func TestCreateDisk_MultiZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1", "zone2", "zone3"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
t.Error(featureGateErr)
Expand Down Expand Up @@ -274,7 +282,8 @@ func TestDeleteDisk_Basic(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
t.Error(featureGateErr)
Expand Down Expand Up @@ -311,7 +320,8 @@ func TestDeleteDisk_NotFound(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
t.Error(featureGateErr)
Expand All @@ -336,7 +346,8 @@ func TestDeleteDisk_ResourceBeingUsed(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
t.Error(featureGateErr)
Expand Down Expand Up @@ -367,7 +378,8 @@ func TestDeleteDisk_SameDiskMultiZone(t *testing.T) {
/* Assert */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1", "zone2", "zone3"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
t.Error(featureGateErr)
Expand Down Expand Up @@ -401,7 +413,8 @@ func TestDeleteDisk_DiffDiskMultiZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zonesWithNodes := []string{"zone1"}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
t.Error(featureGateErr)
Expand Down Expand Up @@ -435,10 +448,11 @@ func TestGetAutoLabelsForPD_Basic(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "us-central1"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zone := "us-central1-c"
zonesWithNodes := []string{zone}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
diskName := "disk"
diskType := DiskTypeSSD
zone := "us-central1-c"
const sizeGb int64 = 128
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
Expand Down Expand Up @@ -472,10 +486,11 @@ func TestGetAutoLabelsForPD_NoZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "europe-west1"
fakeManager := newFakeManager(gceProjectId, gceRegion)
zone := "europe-west1-d"
zonesWithNodes := []string{zone}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
diskName := "disk"
diskType := DiskTypeStandard
zone := "europe-west1-d"
const sizeGb int64 = 128
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
Expand Down Expand Up @@ -508,9 +523,10 @@ func TestGetAutoLabelsForPD_DiskNotFound(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
diskName := "disk"
zone := "asia-northeast1-a"
zonesWithNodes := []string{zone}
fakeManager := newFakeManager(gceProjectId, gceRegion, zonesWithNodes)
diskName := "disk"
gce := GCECloud{manager: fakeManager, managedZones: []string{zone}}

/* Act */
Expand All @@ -526,7 +542,7 @@ func TestGetAutoLabelsForPD_DiskNotFoundAndNoZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectId, gceRegion, []string{})
diskName := "disk"
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
if featureGateErr != nil {
Expand All @@ -551,7 +567,7 @@ func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "us-west1"
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectId, gceRegion, []string{"us-west1-b", "asia-southeast1-a"})
diskName := "disk"
diskType := DiskTypeStandard
zone := "us-west1-b"
Expand Down Expand Up @@ -590,7 +606,7 @@ func TestGetAutoLabelsForPD_DupDiskNoZone(t *testing.T) {
/* Arrange */
gceProjectId := "test-project"
gceRegion := "fake-region"
fakeManager := newFakeManager(gceProjectId, gceRegion)
fakeManager := newFakeManager(gceProjectId, gceRegion, []string{"us-west1-b", "asia-southeast1-a"})
diskName := "disk"
diskType := DiskTypeStandard
const sizeGb int64 = 128
Expand Down Expand Up @@ -637,6 +653,7 @@ type FakeServiceManager struct {
zonalDisks map[string]string // zone: diskName
regionalDisks map[string]sets.String // diskName: zones
waitForOpError error // Error to be returned by WaitForZoneOp or WaitForRegionalOp
zonesWithNodes []string // The zones that nodes are currently in

// Fields for TestCreateDisk
createDiskCalled bool
Expand All @@ -649,12 +666,13 @@ type FakeServiceManager struct {
resourceInUse bool // Marks the disk as in-use
}

func newFakeManager(gceProjectID string, gceRegion string) *FakeServiceManager {
func newFakeManager(gceProjectID string, gceRegion string, zonesWithNodes []string) *FakeServiceManager {
return &FakeServiceManager{
zonalDisks: make(map[string]string),
regionalDisks: make(map[string]sets.String),
gceProjectID: gceProjectID,
gceRegion: gceRegion,
zonalDisks: make(map[string]string),
regionalDisks: make(map[string]sets.String),
gceProjectID: gceProjectID,
gceRegion: gceRegion,
zonesWithNodes: zonesWithNodes,
}
}

Expand Down Expand Up @@ -925,3 +943,7 @@ func (manager *FakeServiceManager) WaitForRegionalOp(
}
return manager.waitForOpError
}

func (manager *FakeServiceManager) GetAllCurrentZones(client clientset.Interface) (sets.String, error) {
return sets.NewString(manager.zonesWithNodes...), nil
}
23 changes: 1 addition & 22 deletions pkg/cloudprovider/providers/gce/gce_instances.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package gce
import (
"errors"
"fmt"
"log"
"net"
"net/http"
"strconv"
Expand All @@ -33,7 +32,6 @@ import (
compute "google.golang.org/api/compute/v1"

"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
Expand Down Expand Up @@ -258,26 +256,7 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error
// which is a relatively infrequent operation, and this is only 1 API call.
// Ideally we want to make this watch-based
func (gce *GCECloud) GetAllCurrentZones() (sets.String, error) {
zones := sets.NewString()
client := gce.client
if client == nil {
return nil, fmt.Errorf("Client not set on GCECloud object")
}
nodeList, err := client.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}

for _, node := range nodeList.Items {
labels := node.ObjectMeta.Labels
zone, ok := labels[kubeletapis.LabelZoneFailureDomain]
if ok {
zones.Insert(zone)
} else {
log.Printf("Could not find zone for node %v", node.ObjectMeta.Name)
}
}
return zones, nil
return gce.manager.GetAllCurrentZones(gce.client)
}

// SetClientSet sets the ClientSet on the GCECloud object for e2e tests
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/storage/volume_provisioning.go
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ var _ = SIGDescribe("Dynamic Provisioning", func() {
gceCloud, err := framework.GetGCECloud()
Expect(err).NotTo(HaveOccurred())

// Get all k8s managed zones (in this case it is just the single zone)
// Get all k8s managed zones (same as zones with nodes in them for test)
managedZones, err = gceCloud.GetAllCurrentZones()
Expect(err).NotTo(HaveOccurred())

Expand Down

0 comments on commit a24264e

Please sign in to comment.