Skip to content

Commit

Permalink
subvolumegroup: add support for size and datapool
Browse files Browse the repository at this point in the history
cephfs subvolumegroup supports creating svg
with quota and the datapool, This PR adds the
support for the same.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
  • Loading branch information
Madhu-1 committed Apr 10, 2024
1 parent 8c8844e commit 4f1aa3a
Show file tree
Hide file tree
Showing 11 changed files with 169 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ spec:
distributed: 1 # distributed=<0, 1> (disabled=0)
# export: # export=<0-256> (disabled=-1)
# random: # random=[0.0, 1.0](disabled=0.0)
# Quota size of the subvolume group.
#quota: 10G
# data pool name for the subvolume group layout instead of the default data pool.
#dataPoolName: myfs-replicated
```

## Settings
Expand All @@ -48,7 +52,11 @@ If any setting is unspecified, a suitable default will be used automatically.

* `filesystemName`: The metadata name of the CephFilesystem CR where the subvolume group will be created.

* `pinning`: To distribute load across MDS ranks in predictable and stable ways. Reference: https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-subvolumes-and-subvolume-groups.
* `quota`: Quota size of the Ceph Filesystem subvolume group.

* `dataPoolName`: The data pool name for the subvolume group layout instead of the default data pool.

* `pinning`: To distribute load across MDS ranks in predictable and stable ways. See the Ceph doc for [Pinning subvolume groups](https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-subvolumes-and-subvolume-groups).
* `distributed`: Range: <0, 1>, for disabling it set to 0
* `export`: Range: <0-256>, for disabling it set to -1
* `random`: Range: [0.0, 1.0], for disabling it set to 0.0
Expand Down
48 changes: 48 additions & 0 deletions Documentation/CRDs/specification.md
Original file line number Diff line number Diff line change
Expand Up @@ -1562,6 +1562,30 @@ reference <a href="https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-su
only one out of (export, distributed, random) can be set at a time</p>
</td>
</tr>
<tr>
<td>
<code>quota</code><br/>
<em>
k8s.io/apimachinery/pkg/api/resource.Quantity
</em>
</td>
<td>
<em>(Optional)</em>
<p>Quota size of the Ceph Filesystem subvolume group.</p>
</td>
</tr>
<tr>
<td>
<code>dataPoolName</code><br/>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>The data pool name for the Ceph Filesystem subvolume group layout, if the default CephFS pool is not desired.</p>
</td>
</tr>
</table>
</td>
</tr>
Expand Down Expand Up @@ -3702,6 +3726,30 @@ reference <a href="https://docs.ceph.com/en/latest/cephfs/fs-volumes/#pinning-su
only one out of (export, distributed, random) can be set at a time</p>
</td>
</tr>
<tr>
<td>
<code>quota</code><br/>
<em>
k8s.io/apimachinery/pkg/api/resource.Quantity
</em>
</td>
<td>
<em>(Optional)</em>
<p>Quota size of the Ceph Filesystem subvolume group.</p>
</td>
</tr>
<tr>
<td>
<code>dataPoolName</code><br/>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>The data pool name for the Ceph Filesystem subvolume group layout, if the default CephFS pool is not desired.</p>
</td>
</tr>
</tbody>
</table>
<h3 id="ceph.rook.io/v1.CephFilesystemSubVolumeGroupSpecPinning">CephFilesystemSubVolumeGroupSpecPinning
Expand Down
10 changes: 10 additions & 0 deletions deploy/charts/rook-ceph/templates/resources.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7977,6 +7977,9 @@ spec:
spec:
description: Spec represents the specification of a Ceph Filesystem SubVolumeGroup
properties:
dataPoolName:
description: The data pool name for the Ceph Filesystem subvolume group layout, if the default CephFS pool is not desired.
type: string
filesystemName:
description: |-
FilesystemName is the name of Ceph Filesystem SubVolumeGroup volume name. Typically it's the name of
Expand Down Expand Up @@ -8018,6 +8021,13 @@ spec:
x-kubernetes-validations:
- message: only one pinning type should be set
rule: (has(self.export) && !has(self.distributed) && !has(self.random)) || (!has(self.export) && has(self.distributed) && !has(self.random)) || (!has(self.export) && !has(self.distributed) && has(self.random)) || (!has(self.export) && !has(self.distributed) && !has(self.random))
quota:
anyOf:
- type: integer
- type: string
description: Quota size of the Ceph Filesystem subvolume group.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
required:
- filesystemName
type: object
Expand Down
10 changes: 10 additions & 0 deletions deploy/examples/crds.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7971,6 +7971,9 @@ spec:
spec:
description: Spec represents the specification of a Ceph Filesystem SubVolumeGroup
properties:
dataPoolName:
description: The data pool name for the Ceph Filesystem subvolume group layout, if the default CephFS pool is not desired.
type: string
filesystemName:
description: |-
FilesystemName is the name of Ceph Filesystem SubVolumeGroup volume name. Typically it's the name of
Expand Down Expand Up @@ -8012,6 +8015,13 @@ spec:
x-kubernetes-validations:
- message: only one pinning type should be set
rule: (has(self.export) && !has(self.distributed) && !has(self.random)) || (!has(self.export) && has(self.distributed) && !has(self.random)) || (!has(self.export) && !has(self.distributed) && has(self.random)) || (!has(self.export) && !has(self.distributed) && !has(self.random))
quota:
anyOf:
- type: integer
- type: string
description: Quota size of the Ceph Filesystem subvolume group.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
required:
- filesystemName
type: object
Expand Down
4 changes: 4 additions & 0 deletions deploy/examples/subvolumegroup.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,7 @@ spec:
distributed: 1 # distributed=<0, 1> (disabled=0)
# export: # export=<0-256> (disabled=-1)
# random: # random=[0.0, 1.0](disabled=0.0)
# Quota size of the subvolume group.
#quota: 10G
# data pool name for the subvolume group layout instead of the default data pool.
#dataPoolName: myfs-replicated
6 changes: 6 additions & 0 deletions pkg/apis/ceph.rook.io/v1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -3016,6 +3016,12 @@ type CephFilesystemSubVolumeGroupSpec struct {
// only one out of (export, distributed, random) can be set at a time
// +optional
Pinning CephFilesystemSubVolumeGroupSpecPinning `json:"pinning,omitempty"`
// Quota size of the Ceph Filesystem subvolume group.
// +optional
Quota *resource.Quantity `json:"quota,omitempty"`
// The data pool name for the Ceph Filesystem subvolume group layout, if the default CephFS pool is not desired.
// +optional
DataPoolName string `json:"dataPoolName"`
}

// CephFilesystemSubVolumeGroupSpecPinning represents the pinning configuration of SubVolumeGroup
Expand Down
79 changes: 75 additions & 4 deletions pkg/daemon/ceph/client/subvolumegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,32 +17,103 @@ limitations under the License.
package client

import (
"encoding/json"
"fmt"
"strconv"
"syscall"

"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/util/exec"
"k8s.io/apimachinery/pkg/types"
)

// CreateCephFSSubVolumeGroup create a CephFS subvolume group.
// volName is the name of the Ceph FS volume, the same as the CephFilesystem CR name.
func CreateCephFSSubVolumeGroup(context *clusterd.Context, clusterInfo *ClusterInfo, volName, groupName string) error {
func CreateCephFSSubVolumeGroup(context *clusterd.Context, clusterInfo *ClusterInfo, volName, groupName string, svgSpec *cephv1.CephFilesystemSubVolumeGroupSpec) error {
logger.Infof("creating cephfs %q subvolume group %q", volName, groupName)
// [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]
// [<size:int>] [--pool_layout <data_pool_name>] [--uid <uid>] [--gid <gid>] [--mode <octal_mode>]
args := []string{"fs", "subvolumegroup", "create", volName, groupName}
if svgSpec != nil {
if svgSpec.Quota != nil {
// convert the size to bytes as ceph expect the size in bytes
args = append(args, fmt.Sprintf("--size=%d", svgSpec.Quota.Value()))
}
if svgSpec.DataPoolName != "" {
args = append(args, fmt.Sprintf("--pool_layout=%s", svgSpec.DataPoolName))
}
}

_, err := getCephFSSubVolumeGroupInfo(context, clusterInfo, volName, groupName)
if err != nil {
// return error other than not found.
if code, ok := exec.ExitStatus(err); ok && code != int(syscall.ENOENT) {
return errors.Wrapf(err, "failed to create subvolume group %q in filesystem %q", groupName, volName)
}
}

// if the subvolumegroup exists, resize the subvolumegroup
if err == nil && svgSpec != nil && svgSpec.Quota != nil {
err = resizeCephFSSubVolumeGroup(context, clusterInfo, volName, groupName, svgSpec)
if err != nil {
return errors.Wrapf(err, "failed to create subvolume group %q in filesystem %q", groupName, volName)
}
}

cmd := NewCephCommand(context, clusterInfo, args)
cmd.JsonOutput = false
output, err := cmd.Run()
if err != nil {
return errors.Wrapf(err, "failed to create subvolume group %q. %s", volName, output)
return errors.Wrapf(err, "failed to create subvolume group %q in filesystem %q. %s", groupName, volName, output)
}

logger.Infof("successfully created cephfs %q subvolume group %q", volName, groupName)
logger.Infof("successfully created subvolume group %q in filesystem %q", groupName, volName)
return nil
}

// resizeCephFSSubVolumeGroup resize a CephFS subvolume group.
// volName is the name of the Ceph FS volume, the same as the CephFilesystem CR name.
func resizeCephFSSubVolumeGroup(context *clusterd.Context, clusterInfo *ClusterInfo, volName, groupName string, svgSpec *cephv1.CephFilesystemSubVolumeGroupSpec) error {
logger.Infof("resizing cephfs %q subvolume group %q", volName, groupName)
// <vol_name> <group_name> <new_size> [--no-shrink]
args := []string{"fs", "subvolumegroup", "resize", volName, groupName, "--no-shrink", fmt.Sprintf("%d", svgSpec.Quota.Value())}
cmd := NewCephCommand(context, clusterInfo, args)
cmd.JsonOutput = false
output, err := cmd.Run()
if err != nil {
return errors.Wrapf(err, "failed to resize subvolume group %q in filesystem %q. %s", groupName, volName, output)
}

logger.Infof("successfully resized subvolume group %q in filesystem %q to %s", groupName, volName, svgSpec.Quota)
return nil
}

type subvolumeGroupInfo struct {
BytesQuota int `json:"bytes_quota"`
BytesUsed int `json:"bytes_used"`
DataPool string `json:"data_pool"`
}

// getCephFSSubVolumeGroupInfo get subvolumegroup info of the group name.
// volName is the name of the Ceph FS volume, the same as the CephFilesystem CR name.
func getCephFSSubVolumeGroupInfo(context *clusterd.Context, clusterInfo *ClusterInfo, volName, groupName string) (*subvolumeGroupInfo, error) {
args := []string{"fs", "subvolumegroup", "info", volName, groupName}
cmd := NewCephCommand(context, clusterInfo, args)
cmd.JsonOutput = true
output, err := cmd.Run()
if err != nil {
return nil, errors.Wrapf(err, "failed to get subvolume group %q in filesystem %q. %s", groupName, volName, output)
}

svgInfo := subvolumeGroupInfo{}
err = json.Unmarshal(output, &svgInfo)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal into subvolumeGroupInfo")
}
return &svgInfo, nil
}

// DeleteCephFSSubVolumeGroup delete a CephFS subvolume group.
func DeleteCephFSSubVolumeGroup(context *clusterd.Context, clusterInfo *ClusterInfo, volName, groupName string) error {
logger.Infof("deleting cephfs %q subvolume group %q", volName, groupName)
Expand Down
2 changes: 1 addition & 1 deletion pkg/operator/ceph/file/filesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func createFilesystem(
}
}

err := cephclient.CreateCephFSSubVolumeGroup(context, clusterInfo, fs.Name, defaultCSISubvolumeGroup)
err := cephclient.CreateCephFSSubVolumeGroup(context, clusterInfo, fs.Name, defaultCSISubvolumeGroup, nil)
if err != nil {
return errors.Wrapf(err, "failed to create subvolume group %q", defaultCSISubvolumeGroup)
}
Expand Down
2 changes: 2 additions & 0 deletions pkg/operator/ceph/file/filesystem_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,8 @@ func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool, createData
return `{"standbys":[], "filesystems":[]}`, nil
} else if reflect.DeepEqual(args[0:5], []string{"fs", "subvolumegroup", "create", fsName, defaultCSISubvolumeGroup}) {
return "", nil
} else if reflect.DeepEqual(args[0:5], []string{"fs", "subvolumegroup", "info", fsName, defaultCSISubvolumeGroup}) {
return "", nil
} else if contains(args, "osd") && contains(args, "lspools") {
return "[]", nil
} else if contains(args, "mds") && contains(args, "fail") {
Expand Down
2 changes: 1 addition & 1 deletion pkg/operator/ceph/file/subvolumegroup/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ func (r *ReconcileCephFilesystemSubVolumeGroup) updateClusterConfig(cephFilesyst
func (r *ReconcileCephFilesystemSubVolumeGroup) createOrUpdateSubVolumeGroup(cephFilesystemSubVolumeGroup *cephv1.CephFilesystemSubVolumeGroup) error {
logger.Infof("creating ceph filesystem subvolume group %s in namespace %s", cephFilesystemSubVolumeGroup.Name, cephFilesystemSubVolumeGroup.Namespace)

err := cephclient.CreateCephFSSubVolumeGroup(r.context, r.clusterInfo, cephFilesystemSubVolumeGroup.Spec.FilesystemName, getSubvolumeGroupName(cephFilesystemSubVolumeGroup))
err := cephclient.CreateCephFSSubVolumeGroup(r.context, r.clusterInfo, cephFilesystemSubVolumeGroup.Spec.FilesystemName, getSubvolumeGroupName(cephFilesystemSubVolumeGroup), &cephFilesystemSubVolumeGroup.Spec)
if err != nil {
return errors.Wrapf(err, "failed to create ceph filesystem subvolume group %q", cephFilesystemSubVolumeGroup.Name)
}
Expand Down
4 changes: 3 additions & 1 deletion tests/framework/installer/ceph_manifests.go
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,9 @@ metadata:
name: ` + groupName + `
namespace: ` + m.settings.Namespace + `
spec:
filesystemName: ` + fsName
filesystemName: ` + fsName + `
quota: 10G
dataPoolName: ` + fsName + "-data0"
}

func (m *CephManifestsMaster) GetCOSIDriver() string {
Expand Down

0 comments on commit 4f1aa3a

Please sign in to comment.