Skip to content

Commit

Permalink
Merge pull request #1019 from crombus/release-4.6
Browse files Browse the repository at this point in the history
Bug 1909793: Backport: [Release 4.6] adds tunefastdeviceclass property
  • Loading branch information
openshift-merge-robot committed Feb 8, 2021
2 parents 24fd0d4 + db4359a commit d7d1377
Show file tree
Hide file tree
Showing 5 changed files with 109 additions and 26 deletions.
4 changes: 4 additions & 0 deletions deploy/crds/ocs.openshift.io_storageclusters_crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1223,6 +1223,10 @@ spec:
when the actual configurable options are defined in rook-ceph'
type: object
properties:
tuneFastDeviceClass:
description: TuneFastDeviceClass tunes the OSD when running
on a fast Device Class
type: boolean
tuneSlowDeviceClass:
description: TuneSlowDeviceClass tunes the OSD when running
on a slow Device Class
Expand Down
4 changes: 4 additions & 0 deletions pkg/apis/ocs/v1/storagecluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,10 @@ type StorageDeviceSetConfig struct {
// TuneSlowDeviceClass tunes the OSD when running on a slow Device Class
// +optional
TuneSlowDeviceClass bool `json:"tuneSlowDeviceClass,omitempty"`

// TuneFastDeviceClass tunes the OSD when running on a fast Device Class
// +optional
TuneFastDeviceClass bool `json:"tuneFastDeviceClass,omitempty"`
}

// MultiCloudGatewaySpec defines specific multi-cloud gateway configuration options
Expand Down
36 changes: 30 additions & 6 deletions pkg/controller/storagecluster/cephcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
rook "github.com/rook/rook/pkg/apis/rook.io/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -23,6 +24,10 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)

var throttleDiskTypes = []string{"gp2", "io1"}

var throttleFastDiskTypes = []string{"managed-premium"}

const (
// Hardcoding networkProvider to multus and this can be changed later to accomodate other providers
networkProvider = "multus"
Expand All @@ -40,15 +45,12 @@ func (r *ReconcileStorageCluster) ensureCephCluster(sc *ocsv1.StorageCluster, re
// this is for performance optimization of slow device class
//TODO: If for a StorageDeviceSet there is a separate metadata pvc template, check for StorageClass of data pvc template only
for i, ds := range sc.Spec.StorageDeviceSets {
throttle, err := r.throttleStorageDevices(*ds.DataPVCTemplate.Spec.StorageClassName)
throttleSlow, throttleFast, err := r.throttleStorageDevices(*ds.DataPVCTemplate.Spec.StorageClassName)
if err != nil {
return fmt.Errorf("Failed to verify StorageClass provisioner. %+v", err)
}
if throttle {
sc.Spec.StorageDeviceSets[i].Config.TuneSlowDeviceClass = true
} else {
sc.Spec.StorageDeviceSets[i].Config.TuneSlowDeviceClass = false
}
sc.Spec.StorageDeviceSets[i].Config.TuneSlowDeviceClass = throttleSlow
sc.Spec.StorageDeviceSets[i].Config.TuneFastDeviceClass = throttleFast
}

if isMultus(sc.Spec.Network) {
Expand Down Expand Up @@ -401,6 +403,7 @@ func newStorageClassDeviceSets(sc *ocsv1.StorageCluster) []rook.StorageClassDevi
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ds.DataPVCTemplate},
Portable: ds.Portable,
TuneSlowDeviceClass: ds.Config.TuneSlowDeviceClass,
TuneFastDeviceClass: ds.Config.TuneFastDeviceClass,
Encrypted: sc.Spec.Encryption.Enable,
}

Expand Down Expand Up @@ -430,3 +433,24 @@ func newCephDaemonResources(custom map[string]corev1.ResourceRequirements) map[s

return resources
}

func (r *ReconcileStorageCluster) throttleStorageDevices(storageClassName string) (bool, bool, error) {
storageClass := &storagev1.StorageClass{}
err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: "", Name: storageClassName}, storageClass)
if err != nil {
return false, false, fmt.Errorf("failed to retrieve StorageClass %q. %+v", storageClassName, err)
}

switch storageClass.Provisioner {
case string(EBS):
if contains(throttleDiskTypes, storageClass.Parameters["type"]) {
return true, false, nil
}
case string(AzureDisk):
if contains(throttleFastDiskTypes, storageClass.Parameters["type"]) {
return false, true, nil
}
}

return false, false, nil
}
27 changes: 7 additions & 20 deletions pkg/controller/storagecluster/reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ import (
"github.com/operator-framework/operator-sdk/pkg/ready"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -48,8 +47,7 @@ mon_max_pg_per_osd = 300
osd_memory_target_cgroup_limit_ratio = 0.5
`
monCountOverrideEnvVar = "MON_COUNT_OVERRIDE"
// EBS represents AWS EBS provisioner for StorageClass
EBS StorageClassProvisionerType = "kubernetes.io/aws-ebs"

//Name of MetadataPVCTemplate
metadataPVCName = "metadata"

Expand All @@ -63,6 +61,12 @@ osd_memory_target_cgroup_limit_ratio = 0.5
ReconcileStrategyManage ReconcileStrategy = "manage"
// ReconcileStrategyStandalone also means never reconcile (NooBaa)
ReconcileStrategyStandalone ReconcileStrategy = "standalone"

// AzureDisk represents Azure Premium Managed Disks provisioner for StorageClass
AzureDisk StorageClassProvisionerType = "kubernetes.io/azure-disk"

// EBS represents AWS EBS provisioner for StorageClass
EBS StorageClassProvisionerType = "kubernetes.io/aws-ebs"
)

var storageClusterFinalizer = "storagecluster.ocs.openshift.io"
Expand All @@ -73,8 +77,6 @@ var validTopologyLabelKeys = []string{
"topology.rook.io",
}

var throttleDiskTypes = []string{"gp2", "io1"}

// Reconcile reads that state of the cluster for a StorageCluster object and makes changes based on the state read
// and what is in the StorageCluster.Spec
// Note:
Expand Down Expand Up @@ -459,21 +461,6 @@ func (r *ReconcileStorageCluster) ensureCephConfig(sc *ocsv1.StorageCluster, req
return nil
}

func (r *ReconcileStorageCluster) throttleStorageDevices(storageClassName string) (bool, error) {
storageClass := &storagev1.StorageClass{}
err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: "", Name: storageClassName}, storageClass)
if err != nil {
return false, fmt.Errorf("failed to retrieve StorageClass %q. %+v", storageClassName, err)
}
switch storageClass.Provisioner {
case string(EBS):
if contains(throttleDiskTypes, storageClass.Parameters["type"]) {
return true, nil
}
}
return false, nil
}

func (r *ReconcileStorageCluster) isActiveStorageCluster(instance *ocsv1.StorageCluster) (bool, error) {
storageClusterList := ocsv1.StorageClusterList{}

Expand Down
64 changes: 64 additions & 0 deletions pkg/controller/storagecluster/storagecluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,70 @@ func TestNonWatchedResourceNameNotFound(t *testing.T) {
assert.Equal(t, reconcile.Result{}, result)
}

func TestThrottleStorageDevices(t *testing.T) {
testcases := []struct {
label string
storageClass *storagev1.StorageClass
storageCluster *api.StorageCluster
expectedSlow bool
expectedFast bool
}{
{
label: "Case 1", // storageclass is gp2 or io1
storageClass: &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "gp2",
},
Provisioner: string(EBS),
Parameters: map[string]string{
"type": "gp2",
},
},
storageCluster: &api.StorageCluster{},
expectedSlow: true,
expectedFast: false,
},
{
label: "Case 2", // storageclass is neither gp2 nor io1
storageClass: &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "st1",
},
Provisioner: string(EBS),
Parameters: map[string]string{
"type": "st1",
},
},
storageCluster: &api.StorageCluster{},
expectedSlow: false,
expectedFast: false,
},
{
label: "Case 3", // storageclass is managed-premium
storageClass: &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "managed-premium",
},
Provisioner: string(AzureDisk),
Parameters: map[string]string{
"type": "managed-premium",
},
},
storageCluster: &api.StorageCluster{},
expectedSlow: false,
expectedFast: true,
},
}

for _, tc := range testcases {
reconciler := createFakeStorageClusterReconciler(t, tc.storageCluster, tc.storageClass)
actualSlow, actualFast, err := reconciler.throttleStorageDevices(tc.storageClass.Name)
assert.NoError(t, err)
assert.Equalf(t, tc.expectedSlow, actualSlow, "[%q]: failed to get expected output", tc.label)
assert.Equalf(t, tc.expectedFast, actualFast, "[%q]: failed to get expected output", tc.label)
}
}

func TestNonWatchedResourceNamespaceNotFound(t *testing.T) {
request := reconcile.Request{
NamespacedName: types.NamespacedName{
Expand Down

0 comments on commit d7d1377

Please sign in to comment.