Skip to content

Commit

Permalink
[US4131]fix(cstor_operator): add all disk on node for pool provisioni…
Browse files Browse the repository at this point in the history
…ng (#884)

* fix(cstor-operator):pick all disks on nodes for manual provisioned pool.

Signed-off-by: Ashutosh Kumar <ashutosh.kumar@openebs.io>
  • Loading branch information
sonasingh46 authored and vishnuitta committed Jan 25, 2019
1 parent a5b71c1 commit 4cc723b
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 23 deletions.
48 changes: 27 additions & 21 deletions cmd/maya-apiserver/spc-watcher/select_disk.go
Expand Up @@ -27,9 +27,16 @@ import (

const (
// DiskStateActive is the active state of the disks.
DiskStateActive = "Active"
DiskStateActive = "Active"
ProvisioningTypeManual = "manual"
ProvisioningTypeAuto = "auto"
)

var defaultDiskCount = map[string]int{
string(v1alpha1.PoolTypeMirroredCPV): int(v1alpha1.MirroredDiskCountCPV),
string(v1alpha1.PoolTypeStripedCPV): int(v1alpha1.StripedDiskCountCPV),
}

// clientset struct holds the interface of internalclientset
// i.e. openebs.
// This struct will be binded to method ListDisk and is useful in mocking
Expand Down Expand Up @@ -59,13 +66,18 @@ func (k *clientSet) nodeDiskAlloter(cp *v1alpha1.StoragePoolClaim) (*nodeDisk, e
if len(listDisk.Items) == 0 {
return nil, errors.New("no disk object found")
}

var provisioningType string
if len(cp.Spec.Disks.DiskList) == 0 {
provisioningType = ProvisioningTypeAuto
} else {
provisioningType = ProvisioningTypeManual
}
// pendingAllotment holds the number of pools that will be pending to be provisioned.
nodeDiskMap, err := k.nodeSelector(listDisk, cp.Spec.PoolSpec.PoolType, cp.Name)
if err != nil {
return nil, err
}
selectedDisk := diskSelector(nodeDiskMap, cp.Spec.PoolSpec.PoolType)
selectedDisk := diskSelector(nodeDiskMap, cp.Spec.PoolSpec.PoolType, provisioningType)
return selectedDisk, nil
}

Expand Down Expand Up @@ -126,8 +138,7 @@ func (k *clientSet) nodeSelector(listDisk *v1alpha1.DiskList, poolType string, s

// diskSelector is the function that will select the required number of disks from qualified nodes
// so as to provision storagepool.
func diskSelector(nodeDiskMap map[string]*diskList, poolType string) *nodeDisk {

func diskSelector(nodeDiskMap map[string]*diskList, poolType, provisioningType string) *nodeDisk {
// selectedDisk will hold a list of disk that will be used to provision storage pool, after a
// minimum number of node qualifies
selectedDisk := &nodeDisk{
Expand All @@ -136,28 +147,23 @@ func diskSelector(nodeDiskMap map[string]*diskList, poolType string) *nodeDisk {
items: []string{},
},
}

// requiredDiskCount will hold the required number of disk that should be selected from a qualified
// diskCount will hold the number of disk that will be selected from a qualified
// node for specific pool type
var requiredDiskCount int
// If pool type is striped, 1 disk should be selected
if poolType == string(v1alpha1.PoolTypeStripedCPV) {
requiredDiskCount = int(v1alpha1.StripedDiskCountCPV)
}
// If pool type is mirrored, 2 disks should be selected
if poolType == string(v1alpha1.PoolTypeMirroredCPV) {
requiredDiskCount = int(v1alpha1.MirroredDiskCountCPV)
}
// Range over the nodeDiskMap map to get the list of disks
var diskCount int
// minRequiredDiskCount will hold the required number of disk that should be selected from a qualified
// node for specific pool type
minRequiredDiskCount := defaultDiskCount[poolType]
for node, val := range nodeDiskMap {

// If the current disk count on the node is less than the required disks
// then this is a dirty node and it will not qualify.
if len(val.items) < requiredDiskCount {
if len(val.items) < minRequiredDiskCount {
continue
}
// Select the required disk from qualified nodes.
for i := 0; i < requiredDiskCount; i++ {
diskCount = minRequiredDiskCount
if provisioningType == ProvisioningTypeManual {
diskCount = (len(val.items) / minRequiredDiskCount) * minRequiredDiskCount
}
for i := 0; i < diskCount; i++ {
selectedDisk.disks.items = append(selectedDisk.disks.items, val.items[i])
}
selectedDisk.nodeName = node
Expand Down
36 changes: 34 additions & 2 deletions cmd/maya-apiserver/spc-watcher/select_disk_test.go
Expand Up @@ -19,11 +19,12 @@ package spc
import (
"testing"
//openebsFakeClientset "github.com/openebs/maya/pkg/client/clientset/versioned/fake"
"strconv"

"github.com/golang/glog"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
openebsFakeClientset "github.com/openebs/maya/pkg/client/generated/clientset/internalclientset/fake"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"strconv"
)

func (focs *clientSet) FakeDiskCreator() {
Expand Down Expand Up @@ -146,7 +147,7 @@ func TestNodeDiskAlloter(t *testing.T) {
},
},
},
1,
3,
false,
},
// Test Case #6
Expand Down Expand Up @@ -179,7 +180,38 @@ func TestNodeDiskAlloter(t *testing.T) {
0,
false,
},
// Test Case #8
"manualSPC8": {&v1alpha1.StoragePoolClaim{
Spec: v1alpha1.StoragePoolClaimSpec{
Type: "disk",
PoolSpec: v1alpha1.CStorPoolAttr{
PoolType: "mirrored",
},
Disks: v1alpha1.DiskAttr{
DiskList: []string{"disk1", "disk2", "disk3", "disk4"},
},
},
},
4,
false,
},
// Test Case #8
"manualSPC9": {&v1alpha1.StoragePoolClaim{
Spec: v1alpha1.StoragePoolClaimSpec{
Type: "disk",
PoolSpec: v1alpha1.CStorPoolAttr{
PoolType: "mirrored",
},
Disks: v1alpha1.DiskAttr{
DiskList: []string{"disk1", "disk2", "disk3"},
},
},
},
2,
false,
},
}

for name, test := range tests {
t.Run(name, func(t *testing.T) {
diskList, err := focs.nodeDiskAlloter(test.fakeCasPool)
Expand Down

0 comments on commit 4cc723b

Please sign in to comment.