Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add mixed raid levels #2520 #2524

Merged
merged 1 commit into from Apr 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
137 changes: 103 additions & 34 deletions src/rockstor/fs/btrfs.py
Expand Up @@ -96,49 +96,98 @@
DefaultSubvol = collections.namedtuple("DefaultSubvol", "id path boot_to_snap")
# Named Tuple for balance status: active (boolean) internal (boolean) status (dict)
BalanceStatusAll = collections.namedtuple("BalanceStatusAll", "active internal status")
# Named Tuple to define raid profile limits
btrfs_profile = collections.namedtuple("btrfs_profile", "min_dev_count max_dev_missing")
# List of profiles indexed by their name.
# Named Tuple to define raid profile limits and data/metadata
btrfs_profile = collections.namedtuple(
"btrfs_profile", "min_dev_count max_dev_missing data_raid metadata_raid"
)
# List of Rockstor btrfs raid profiles indexed by their name.
# I.e. PROFILE[raid_level].min_dev_count
# N.B. Mixed profiles indicated by "-" i.e. DATA-METADATA
# https://btrfs.readthedocs.io/en/latest/Kernel-by-version.html#jan-2020
# RAID1C34 along with incompatible flag added in kernel 5.5.
# https://btrfs.readthedocs.io/en/latest/Kernel-by-version.html#nov-2021 kernel 5.15
# enabled running raid0 and 10 with a minimum of 1 & 2 devices respectively.
# https://btrfs.readthedocs.io/en/latest/mkfs.btrfs.html
# "It's recommended to use specific profiles ..."
# The following are Rockstor's specifics:
PROFILE = {
# Fail through profile to account catch unknown raid levels/combinations.
# We specify a min dev count of 4 to account for any raid level,
# and likewise play safe by allowing for no missing devices.
"unknown": btrfs_profile(
min_dev_count=4, max_dev_missing=0, data_raid="unknown", metadata_raid="unknown"
),
# non redundant profiles!
"single": btrfs_profile(min_dev_count=1, max_dev_missing=0),
"raid0": btrfs_profile(min_dev_count=2, max_dev_missing=0),
"single": btrfs_profile(
min_dev_count=1, max_dev_missing=0, data_raid="single", metadata_raid="single"
),
"single-dup": btrfs_profile(
min_dev_count=1, max_dev_missing=0, data_raid="single", metadata_raid="dup"
),
"raid0": btrfs_profile(
min_dev_count=2, max_dev_missing=0, data_raid="raid0", metadata_raid="raid0"
),
# Mirrored profiles:
"raid1": btrfs_profile(min_dev_count=2, max_dev_missing=1),
"raid1c3": btrfs_profile(min_dev_count=3, max_dev_missing=2),
"raid1c4": btrfs_profile(min_dev_count=4, max_dev_missing=3),
"raid10": btrfs_profile(min_dev_count=4, max_dev_missing=1),
# Parity raid levels
"raid5": btrfs_profile(min_dev_count=2, max_dev_missing=1),
"raid6": btrfs_profile(min_dev_count=3, max_dev_missing=2),
"raid1": btrfs_profile(
min_dev_count=2, max_dev_missing=1, data_raid="raid1", metadata_raid="raid1"
),
"raid1c3": btrfs_profile(
min_dev_count=3, max_dev_missing=2, data_raid="raid1c3", metadata_raid="raid1c3"
),
"raid1c4": btrfs_profile(
min_dev_count=4, max_dev_missing=3, data_raid="raid1c4", metadata_raid="raid1c4"
),
"raid10": btrfs_profile(
min_dev_count=4, max_dev_missing=1, data_raid="raid10", metadata_raid="raid10"
),
# Parity raid levels (recommended min_dev_count is 3 & 4 respectively)
"raid5": btrfs_profile(
min_dev_count=2, max_dev_missing=1, data_raid="raid5", metadata_raid="raid5"
),
"raid6": btrfs_profile(
min_dev_count=3, max_dev_missing=2, data_raid="raid6", metadata_raid="raid6"
),
# ------- MIXED PROFILES DATA-METADATA (max 10 chars) -------
# Mixed Mirrored profiles:
"raid1-1c3": btrfs_profile(min_dev_count=3, max_dev_missing=1),
"raid1-1c4": btrfs_profile(min_dev_count=4, max_dev_missing=1),
"raid10-1c3": btrfs_profile(min_dev_count=4, max_dev_missing=1),
"raid10-1c4": btrfs_profile(min_dev_count=4, max_dev_missing=1),
"raid1-1c3": btrfs_profile(
min_dev_count=3, max_dev_missing=1, data_raid="raid1", metadata_raid="raid1c3"
),
"raid1-1c4": btrfs_profile(
min_dev_count=4, max_dev_missing=1, data_raid="raid1", metadata_raid="raid1c4"
),
"raid10-1c3": btrfs_profile(
min_dev_count=4, max_dev_missing=1, data_raid="raid10", metadata_raid="raid1c3"
),
"raid10-1c4": btrfs_profile(
min_dev_count=4, max_dev_missing=1, data_raid="raid10", metadata_raid="raid1c4"
),
# Parity data - Mirrored metadata
"raid5-1": btrfs_profile(min_dev_count=2, max_dev_missing=1),
"raid5-1c3": btrfs_profile(min_dev_count=3, max_dev_missing=1),
"raid6-1c3": btrfs_profile(min_dev_count=3, max_dev_missing=2),
"raid6-1c4": btrfs_profile(min_dev_count=4, max_dev_missing=2),
"raid5-1": btrfs_profile(
min_dev_count=2, max_dev_missing=1, data_raid="raid5", metadata_raid="raid1"
),
"raid5-1c3": btrfs_profile(
min_dev_count=3, max_dev_missing=1, data_raid="raid5", metadata_raid="raid1c3"
),
"raid6-1c3": btrfs_profile(
min_dev_count=3, max_dev_missing=2, data_raid="raid6", metadata_raid="raid1c3"
),
"raid6-1c4": btrfs_profile(
min_dev_count=4, max_dev_missing=2, data_raid="raid6", metadata_raid="raid1c4"
),
}


def add_pool(pool, disks):
"""
Makes a btrfs pool (filesystem) of name 'pool' using the by-id disk names
provided, then attempts to enables quotas for this pool.
:param pool: name of pool to create.
provided, then attempts to enable quotas for this pool.
:param pool: Pool object.
:param disks: list of by-id disk names without paths to make the pool from.
:return o, err, rc from last command executed.
"""
disks_fp = [get_device_path(d) for d in disks]
draid = mraid = pool.raid
if pool.raid == "single":
mraid = "dup"
draid = PROFILE[pool.raid].data_raid
mraid = PROFILE[pool.raid].metadata_raid
cmd = [MKFS_BTRFS, "-f", "-d", draid, "-m", mraid, "-L", pool.name]
cmd.extend(disks_fp)
# Run the create pool command, any exceptions are logged and raised by
Expand Down Expand Up @@ -452,8 +501,7 @@ def get_pool_info(disk):
return pool_info


def pool_raid(mnt_pt):
# TODO: propose name change to get_pool_raid_levels(mnt_pt)
def get_pool_raid_levels(mnt_pt):
o, e, rc = run_command([BTRFS, "fi", "df", mnt_pt])
# data, system, metadata, globalreserve
raid_d = {}
Expand All @@ -464,11 +512,35 @@ def pool_raid(mnt_pt):
raid = fields[1][:-1].lower()
if block not in raid_d:
raid_d[block] = raid
if raid_d["metadata"] == "single":
raid_d["data"] = raid_d["metadata"]
return raid_d


def get_pool_raid_profile(raid_levels):
"""
Abstracts raid_levels from get_pool_raid_levels(mnt_pt) to a Rockstor raid Profile.
See PROFILES const.
:param raid_levels: dict returned by get_pool_raid_levels()
:return: a PROFILE index.
"""
# dict.get returns None if key not found.
data_raid = raid_levels.get("data")
metadata_raid = raid_levels.get("metadata")
raid_profile = "unknown"
if data_raid is None or metadata_raid is None:
return raid_profile
if data_raid == metadata_raid:
raid_profile = data_raid
else:
# Post Python >= 3.9 use removeprefix("raid")
if metadata_raid.startswith("raid"): # 4 characters
raid_profile = data_raid + "-" + metadata_raid[4:]
else:
raid_profile = data_raid + "-" + metadata_raid
if raid_profile not in PROFILE:
return "unknown"
return raid_profile


def cur_devices(mnt_pt):
"""
When given a btrfs mount point a list containing the full path of all
Expand Down Expand Up @@ -1905,11 +1977,8 @@ def balance_pool_cmd(mnt_pt, force=False, convert=None):
if force:
cmd.insert(3, "-f")
if convert is not None:
cmd.insert(3, "-dconvert={}".format(convert))
# Override metadata on single pools to be dup, as per btrfs default.
if convert == "single":
convert = "dup"
cmd.insert(3, "-mconvert={}".format(convert))
cmd.insert(3, "-dconvert={}".format(PROFILE[convert].data_raid))
cmd.insert(3, "-mconvert={}".format(PROFILE[convert].metadata_raid))
else:
# As we are running with no convert filters a warning and 10 second
# countdown with ^C prompt will result unless we use "--full-balance".
Expand Down
106 changes: 102 additions & 4 deletions src/rockstor/fs/tests/test_btrfs.py
Expand Up @@ -16,7 +16,7 @@
import unittest
from datetime import datetime
from fs.btrfs import (
pool_raid,
get_pool_raid_levels,
is_subvol,
volume_usage,
balance_status,
Expand All @@ -39,6 +39,7 @@
btrfsprogs_legacy,
scrub_status_raw,
scrub_status_extra,
get_pool_raid_profile,
)
from mock import patch

Expand Down Expand Up @@ -84,7 +85,7 @@ def tearDown(self):
def test_get_pool_raid_levels_identification(self):
"""
Presents the raid identification function with example data & compares
it's return dict to that expected for the given input. :return: 'ok'
its return dict to that expected for the given input. :return: 'ok'
if all is as expected or a message indicating which raid level was
incorrectly identified given the test data. N.B. Only the first raid
level fail is indicated, however all are expected to pass anyway so we
Expand Down Expand Up @@ -112,6 +113,19 @@ def test_get_pool_raid_levels_identification(self):
"globalreserve": "single",
"metadata": "single",
}
single_dup_fi_df = [
"Data, single: total=3.00GiB, used=0.00B",
"System, DUP: total=32.00MiB, used=16.00KiB",
"Metadata, DUP: total=768.00MiB, used=144.00KiB",
"GlobalReserve, single: total=3.50MiB, used=0.00B",
"",
]
single_dup_return = {
"data": "single",
"system": "dup",
"globalreserve": "single",
"metadata": "dup",
}
raid0_fi_df = [
"Data, RAID0: total=512.00MiB, used=256.00KiB",
"System, RAID0: total=16.00MiB, used=16.00KiB",
Expand Down Expand Up @@ -208,38 +222,75 @@ def test_get_pool_raid_levels_identification(self):
"globalreserve": "single",
"metadata": "dup",
}
# N.B. observer output after multiple balance events.
# We currently ignore "GlobalReserve"
raid1_1c3_fi_df = [
"Data, RAID1: total=3.00GiB, used=0.00B",
"System, RAID1C3: total=32.00MiB, used=16.00KiB",
"Metadata, RAID1C3: total=768.00MiB, used=144.00KiB",
"GlobalReserve, single: total=3.50MiB, used=0.00B",
"",
]
raid1_1c3_return = {
"data": "raid1",
"system": "raid1c3",
"globalreserve": "single",
"metadata": "raid1c3",
}
raid6_1c4_fi_df = [
"Data, RAID6: total=4.00GiB, used=0.00B",
"System, RAID1C4: total=32.00MiB, used=16.00KiB",
"Metadata, RAID1C4: total=768.00MiB, used=144.00KiB",
"GlobalReserve, single: total=3.50MiB, used=0.00B",
"",
]
raid6_1c4_return = {
"data": "raid6",
"system": "raid1c4",
"globalreserve": "single",
"metadata": "raid1c4",
}
# list used to report what raid level is currently under test.
raid_levels_tested = [
"single",
"single-dup",
"raid0",
"raid1",
"raid10",
"raid5",
"raid6",
"raid1_some_single_chunks",
"default_sys_pool",
"raid1-1c3",
"raid6-1c4",
]
# list of example fi_df outputs in raid_levels_tested order
btrfs_fi_di = [
single_fi_df,
single_dup_fi_df,
raid0_fi_df,
raid1_fi_df,
raid10_fi_df,
raid5_fi_df,
raid6_fi_df,
raid1_fi_df_some_single_chunks,
default_sys_fi_df,
raid1_1c3_fi_df,
raid6_1c4_fi_df,
]
# list of correctly parsed return dictionaries
return_dict = [
single_return,
single_dup_return,
raid0_return,
raid1_return,
raid10_return,
raid5_return,
raid6_return,
raid1_return,
default_sys_return,
raid1_1c3_return,
raid6_1c4_return,
]
# simple iteration over above example inputs to expected outputs.
for raid_level, fi_df, expected_result in map(
Expand All @@ -249,12 +300,59 @@ def test_get_pool_raid_levels_identification(self):
self.mock_run_command.return_value = (fi_df, cmd_e, cmd_rc)
# assert get_pool_raid_level returns what we expect.
self.assertEqual(
pool_raid(mount_point),
get_pool_raid_levels(mount_point),
expected_result,
msg="get_pool_raid_level() miss identified raid "
msg="get_pool_raid_levels() miss identified raid "
"level {}".format(raid_level),
)

def test_get_pool_raid_profile(self):
"""
Present get_pool_raid_profile() with example output from get_pool_raid_levels()
and ensure it returns the appropriate profile
"""
# N.B. dict limits test data to unique indexes (expected profiles).
test_raid_levels = {
"raid6-1c4": {
"data": "raid6",
"system": "raid1c4",
"globalreserve": "single",
"metadata": "raid1c4",
},
"single": {
"data": "single",
"system": "single",
"globalreserve": "single",
"metadata": "single",
},
"single-dup": {
"data": "single",
"system": "dup",
"globalreserve": "single",
"metadata": "dup",
},
"unknown": {},
}
for profile, raid_levels in test_raid_levels.items():
self.assertEqual(
get_pool_raid_profile(raid_levels),
profile,
msg="get_pool_raid_profile() failed for profile {}".format(profile),
)

def test_get_pool_raid_profile_unknown_matched(self):
fake_levels = {
"data": "fakelevel",
"system": "fakelevelmeta",
"globalreserve": "yaf",
"metadata": "fakelevel",
}
self.assertEqual(
get_pool_raid_profile(fake_levels),
"unknown",
msg="matching unknown data-metadata, should return unknown",
)

def test_is_subvol_exists(self):
mount_point = "/mnt2/test-pool/test-share"
o = [
Expand Down