Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(ZFSPV): scheduler for ZFSPV #8

Merged
merged 4 commits into from Nov 6, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/main.go
Expand Up @@ -51,7 +51,7 @@ func main() {
)

cmd.PersistentFlags().StringVar(
&config.DriverName, "name", "zfs-localpv", "Name of this driver",
&config.DriverName, "name", "zfs.csi.openebs.io", "Name of this driver",
kmova marked this conversation as resolved.
Show resolved Hide resolved
)

cmd.PersistentFlags().StringVar(
Expand Down
3 changes: 1 addition & 2 deletions deploy/sample/fio.yaml
Expand Up @@ -12,8 +12,7 @@ parameters:
#keyformat: "raw"
#keylocation: "file:///home/pawan/key"
poolname: "zfspv-pool"
provisioner: zfs-localpv
volumeBindingMode: WaitForFirstConsumer
provisioner: zfs.csi.openebs.io
allowedTopologies:
- matchLabelExpressions:
- key: kubernetes.io/hostname
Expand Down
9 changes: 1 addition & 8 deletions deploy/sample/percona.yaml
Expand Up @@ -9,14 +9,7 @@ parameters:
dedup: "on"
thinprovision: "yes"
poolname: "zfspv-pool"
provisioner: zfs-localpv
volumeBindingMode: WaitForFirstConsumer
allowedTopologies:
- matchLabelExpressions:
- key: kubernetes.io/hostname
values:
- gke-zfspv-pawan-default-pool-c8929518-cgd4
- gke-zfspv-pawan-default-pool-c8929518-dxzc
provisioner: zfs.csi.openebs.io
---
kind: PersistentVolumeClaim
apiVersion: v1
Expand Down
4 changes: 4 additions & 0 deletions deploy/zfs-operator.yaml
Expand Up @@ -23,6 +23,10 @@ spec:
- zvol
- zv
additionalPrinterColumns:
- JSONPath: .spec.poolName
name: ZPool
description: ZFS Pool where the volume is created
type: string
- JSONPath: .spec.ownerNodeID
name: Node
description: Node where the volume is created
Expand Down
17 changes: 11 additions & 6 deletions pkg/driver/controller.go
Expand Up @@ -61,7 +61,6 @@ func (cs *controller) CreateVolume(
req *csi.CreateVolumeRequest,
) (*csi.CreateVolumeResponse, error) {

logrus.Infof("received request to create volume {%s} vol{%v}", req.GetName(), req)
var err error

if err = cs.validateVolumeCreateReq(req); err != nil {
Expand All @@ -78,9 +77,15 @@ func (cs *controller) CreateVolume(
kl := req.GetParameters()["keylocation"]
pool := req.GetParameters()["poolname"]
tp := req.GetParameters()["thinprovision"]
schld := req.GetParameters()["scheduler"]

// setting first in preferred list as the ownernode of this volume
OwnerNode := req.AccessibilityRequirements.Preferred[0].Segments[zvol.ZFSTopologyKey]
selected := scheduler(req.AccessibilityRequirements, schld, pool)

if len(selected) == 0 {
return nil, status.Error(codes.Internal, "scheduler failed")
}

logrus.Infof("scheduled the volume %s/%s on node %s", pool, volName, selected)

volObj, err := builder.NewBuilder().
WithName(volName).
Expand All @@ -92,7 +97,7 @@ func (cs *controller) CreateVolume(
WithKeyFormat(kf).
WithKeyLocation(kl).
WithThinProv(tp).
WithOwnerNode(OwnerNode).
WithOwnerNode(selected).
WithCompression(compression).Build()

if err != nil {
Expand All @@ -101,10 +106,10 @@ func (cs *controller) CreateVolume(

err = zvol.ProvisionVolume(size, volObj)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
return nil, status.Error(codes.Internal, "not able to provision the volume")
}

topology := map[string]string{zvol.ZFSTopologyKey: OwnerNode}
topology := map[string]string{zvol.ZFSTopologyKey: selected}

return csipayload.NewCreateVolumeResponseBuilder().
WithName(volName).
Expand Down
96 changes: 96 additions & 0 deletions pkg/driver/scheduler.go
@@ -0,0 +1,96 @@
/*
Copyright © 2019 The OpenEBS Authors

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package driver

import (
"github.com/Sirupsen/logrus"
"math"

"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/openebs/zfs-localpv/pkg/builder"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

zvol "github.com/openebs/zfs-localpv/pkg/zfs"
)

// scheduling algorithm constants
const (
// pick the node where less volumes are provisioned for the given pool
// this will be the default scheduler when none provided
VolumeWeighted = "VolumeWeighted"
)

// volumeWeightedScheduler goes through all the pools on the nodes mentioned
// in the topology and picks the node which has less volume on
// the given zfs pool.
func volumeWeightedScheduler(topo *csi.TopologyRequirement, pool string) string {
var selected string

zvlist, err := builder.NewKubeclient().
WithNamespace(zvol.OpenEBSNamespace).
List(metav1.ListOptions{})

if err != nil {
return ""
}

volmap := map[string]int{}

// create the map of the volume count
// for the given pool
for _, zv := range zvlist.Items {
if zv.Spec.PoolName == pool {
volmap[zv.Spec.OwnerNodeID]++
}
}

var numVol int = math.MaxInt32

// schedule it on the node which has less
// number of volume for the given pool
for _, prf := range topo.Preferred {
node := prf.Segments[zvol.ZFSTopologyKey]
if volmap[node] < numVol {
selected = node
numVol = volmap[node]
}
}
return selected
}

// scheduler schedules the PV as per topology constraints for
// the given zfs pool.
func scheduler(topo *csi.TopologyRequirement, schld string, pool string) string {

if len(topo.Preferred) == 0 {
logrus.Errorf("topology information not provided")
return ""
}
// if there is a single node, schedule it on that
if len(topo.Preferred) == 1 {
return topo.Preferred[0].Segments[zvol.ZFSTopologyKey]
}

switch schld {
case VolumeWeighted:
return volumeWeightedScheduler(topo, pool)
default:
return volumeWeightedScheduler(topo, pool)
}

return ""
}
1 change: 0 additions & 1 deletion pkg/zfs/volume.go
Expand Up @@ -118,7 +118,6 @@ func UpdateZvolInfo(vol *apis.ZFSVolume) error {
}

newVol, err := builder.BuildFrom(vol).
WithNodename(NodeID).
WithFinalizer(finalizers).
WithLabels(labels).Build()

Expand Down