From 85c5342bd70290104723cdb50d1e8dfdf09517e5 Mon Sep 17 00:00:00 2001 From: Philip Guyton Date: Thu, 23 Mar 2023 20:56:10 +0000 Subject: [PATCH] Add mixed raid levels #2520 Extend our existing raid levels to include: raid1c3 and raid1c4, introduced in kernel 5.5. Additionally add mixed raid capability. We artificially reduce mixed raid profile options to assist with usability. Adds abstraction, with tests, to generate a profile from our existing pool raid levels mechanism. Includes: - Minor rename-refactor from prior TODO. - Enable distinguishing between single & single-dup. - Add an "unknown" profile as fail-through catch-all. - Enable easy Web-UI access to data/metadata via additional Pool object properties as thin PROFILE look-ups. - Surface data-metadata in pool details Web-UI page. - Remove now redundant single raid designator for data, if metadata = single. As we now have data-metadata surfaced we no longer need this. - Refactor supported profiles var for test use. SUPPORTED_PROFILES was a class variable without requirement to be this way. Move to module level and use in test_pool.py unsupported profile test case. --- src/rockstor/fs/btrfs.py | 137 +++++++++++++----- src/rockstor/fs/tests/test_btrfs.py | 106 +++++++++++++- src/rockstor/storageadmin/models/pool.py | 17 +++ src/rockstor/storageadmin/serializers.py | 3 + .../js/templates/pool/add_pool_template.jst | 7 +- .../js/templates/pool/pool_info_module.jst | 3 +- .../js/templates/pool/resize/add_disks.jst | 4 +- .../js/templates/pool/resize/raid_change.jst | 4 +- .../static/storageadmin/js/views/add_pool.js | 14 ++ .../js/views/pool/resize/add_disks.js | 5 +- .../js/views/pool/resize/raid_change.js | 5 +- .../storageadmin/tests/test_commands.py | 2 +- src/rockstor/storageadmin/tests/test_disks.py | 2 +- src/rockstor/storageadmin/tests/test_pools.py | 6 +- src/rockstor/storageadmin/views/command.py | 6 +- src/rockstor/storageadmin/views/disk.py | 10 +- src/rockstor/storageadmin/views/pool.py | 29 +++- 17 files changed, 295 insertions(+), 65 deletions(-) diff --git a/src/rockstor/fs/btrfs.py b/src/rockstor/fs/btrfs.py index 39fde767c..93be42573 100644 --- a/src/rockstor/fs/btrfs.py +++ b/src/rockstor/fs/btrfs.py @@ -96,49 +96,98 @@ DefaultSubvol = collections.namedtuple("DefaultSubvol", "id path boot_to_snap") # Named Tuple for balance status: active (boolean) internal (boolean) status (dict) BalanceStatusAll = collections.namedtuple("BalanceStatusAll", "active internal status") -# Named Tuple to define raid profile limits -btrfs_profile = collections.namedtuple("btrfs_profile", "min_dev_count max_dev_missing") -# List of profiles indexed by their name. +# Named Tuple to define raid profile limits and data/metadata +btrfs_profile = collections.namedtuple( + "btrfs_profile", "min_dev_count max_dev_missing data_raid metadata_raid" +) +# List of Rockstor btrfs raid profiles indexed by their name. # I.e. PROFILE[raid_level].min_dev_count # N.B. Mixed profiles indicated by "-" i.e. DATA-METADATA +# https://btrfs.readthedocs.io/en/latest/Kernel-by-version.html#jan-2020 +# RAID1C34 along with incompatible flag added in kernel 5.5. +# https://btrfs.readthedocs.io/en/latest/Kernel-by-version.html#nov-2021 kernel 5.15 +# enabled running raid0 and 10 with a minimum of 1 & 2 devices respectively. +# https://btrfs.readthedocs.io/en/latest/mkfs.btrfs.html +# "It's recommended to use specific profiles ..." +# The following are Rockstor's specifics: PROFILE = { + # Fail through profile to account catch unknown raid levels/combinations. + # We specify a min dev count of 4 to account for any raid level, + # and likewise play safe by allowing for no missing devices. + "unknown": btrfs_profile( + min_dev_count=4, max_dev_missing=0, data_raid="unknown", metadata_raid="unknown" + ), # non redundant profiles! - "single": btrfs_profile(min_dev_count=1, max_dev_missing=0), - "raid0": btrfs_profile(min_dev_count=2, max_dev_missing=0), + "single": btrfs_profile( + min_dev_count=1, max_dev_missing=0, data_raid="single", metadata_raid="single" + ), + "single-dup": btrfs_profile( + min_dev_count=1, max_dev_missing=0, data_raid="single", metadata_raid="dup" + ), + "raid0": btrfs_profile( + min_dev_count=2, max_dev_missing=0, data_raid="raid0", metadata_raid="raid0" + ), # Mirrored profiles: - "raid1": btrfs_profile(min_dev_count=2, max_dev_missing=1), - "raid1c3": btrfs_profile(min_dev_count=3, max_dev_missing=2), - "raid1c4": btrfs_profile(min_dev_count=4, max_dev_missing=3), - "raid10": btrfs_profile(min_dev_count=4, max_dev_missing=1), - # Parity raid levels - "raid5": btrfs_profile(min_dev_count=2, max_dev_missing=1), - "raid6": btrfs_profile(min_dev_count=3, max_dev_missing=2), + "raid1": btrfs_profile( + min_dev_count=2, max_dev_missing=1, data_raid="raid1", metadata_raid="raid1" + ), + "raid1c3": btrfs_profile( + min_dev_count=3, max_dev_missing=2, data_raid="raid1c3", metadata_raid="raid1c3" + ), + "raid1c4": btrfs_profile( + min_dev_count=4, max_dev_missing=3, data_raid="raid1c4", metadata_raid="raid1c4" + ), + "raid10": btrfs_profile( + min_dev_count=4, max_dev_missing=1, data_raid="raid10", metadata_raid="raid10" + ), + # Parity raid levels (recommended min_dev_count is 3 & 4 respectively) + "raid5": btrfs_profile( + min_dev_count=2, max_dev_missing=1, data_raid="raid5", metadata_raid="raid5" + ), + "raid6": btrfs_profile( + min_dev_count=3, max_dev_missing=2, data_raid="raid6", metadata_raid="raid6" + ), # ------- MIXED PROFILES DATA-METADATA (max 10 chars) ------- # Mixed Mirrored profiles: - "raid1-1c3": btrfs_profile(min_dev_count=3, max_dev_missing=1), - "raid1-1c4": btrfs_profile(min_dev_count=4, max_dev_missing=1), - "raid10-1c3": btrfs_profile(min_dev_count=4, max_dev_missing=1), - "raid10-1c4": btrfs_profile(min_dev_count=4, max_dev_missing=1), + "raid1-1c3": btrfs_profile( + min_dev_count=3, max_dev_missing=1, data_raid="raid1", metadata_raid="raid1c3" + ), + "raid1-1c4": btrfs_profile( + min_dev_count=4, max_dev_missing=1, data_raid="raid1", metadata_raid="raid1c4" + ), + "raid10-1c3": btrfs_profile( + min_dev_count=4, max_dev_missing=1, data_raid="raid10", metadata_raid="raid1c3" + ), + "raid10-1c4": btrfs_profile( + min_dev_count=4, max_dev_missing=1, data_raid="raid10", metadata_raid="raid1c4" + ), # Parity data - Mirrored metadata - "raid5-1": btrfs_profile(min_dev_count=2, max_dev_missing=1), - "raid5-1c3": btrfs_profile(min_dev_count=3, max_dev_missing=1), - "raid6-1c3": btrfs_profile(min_dev_count=3, max_dev_missing=2), - "raid6-1c4": btrfs_profile(min_dev_count=4, max_dev_missing=2), + "raid5-1": btrfs_profile( + min_dev_count=2, max_dev_missing=1, data_raid="raid5", metadata_raid="raid1" + ), + "raid5-1c3": btrfs_profile( + min_dev_count=3, max_dev_missing=1, data_raid="raid5", metadata_raid="raid1c3" + ), + "raid6-1c3": btrfs_profile( + min_dev_count=3, max_dev_missing=2, data_raid="raid6", metadata_raid="raid1c3" + ), + "raid6-1c4": btrfs_profile( + min_dev_count=4, max_dev_missing=2, data_raid="raid6", metadata_raid="raid1c4" + ), } def add_pool(pool, disks): """ Makes a btrfs pool (filesystem) of name 'pool' using the by-id disk names - provided, then attempts to enables quotas for this pool. - :param pool: name of pool to create. + provided, then attempts to enable quotas for this pool. + :param pool: Pool object. :param disks: list of by-id disk names without paths to make the pool from. :return o, err, rc from last command executed. """ disks_fp = [get_device_path(d) for d in disks] - draid = mraid = pool.raid - if pool.raid == "single": - mraid = "dup" + draid = PROFILE[pool.raid].data_raid + mraid = PROFILE[pool.raid].metadata_raid cmd = [MKFS_BTRFS, "-f", "-d", draid, "-m", mraid, "-L", pool.name] cmd.extend(disks_fp) # Run the create pool command, any exceptions are logged and raised by @@ -452,8 +501,7 @@ def get_pool_info(disk): return pool_info -def pool_raid(mnt_pt): - # TODO: propose name change to get_pool_raid_levels(mnt_pt) +def get_pool_raid_levels(mnt_pt): o, e, rc = run_command([BTRFS, "fi", "df", mnt_pt]) # data, system, metadata, globalreserve raid_d = {} @@ -464,11 +512,35 @@ def pool_raid(mnt_pt): raid = fields[1][:-1].lower() if block not in raid_d: raid_d[block] = raid - if raid_d["metadata"] == "single": - raid_d["data"] = raid_d["metadata"] return raid_d +def get_pool_raid_profile(raid_levels): + """ + Abstracts raid_levels from get_pool_raid_levels(mnt_pt) to a Rockstor raid Profile. + See PROFILES const. + :param raid_levels: dict returned by get_pool_raid_levels() + :return: a PROFILE index. + """ + # dict.get returns None if key not found. + data_raid = raid_levels.get("data") + metadata_raid = raid_levels.get("metadata") + raid_profile = "unknown" + if data_raid is None or metadata_raid is None: + return raid_profile + if data_raid == metadata_raid: + raid_profile = data_raid + else: + # Post Python >= 3.9 use removeprefix("raid") + if metadata_raid.startswith("raid"): # 4 characters + raid_profile = data_raid + "-" + metadata_raid[4:] + else: + raid_profile = data_raid + "-" + metadata_raid + if raid_profile not in PROFILE: + return "unknown" + return raid_profile + + def cur_devices(mnt_pt): """ When given a btrfs mount point a list containing the full path of all @@ -1905,11 +1977,8 @@ def balance_pool_cmd(mnt_pt, force=False, convert=None): if force: cmd.insert(3, "-f") if convert is not None: - cmd.insert(3, "-dconvert={}".format(convert)) - # Override metadata on single pools to be dup, as per btrfs default. - if convert == "single": - convert = "dup" - cmd.insert(3, "-mconvert={}".format(convert)) + cmd.insert(3, "-dconvert={}".format(PROFILE[convert].data_raid)) + cmd.insert(3, "-mconvert={}".format(PROFILE[convert].metadata_raid)) else: # As we are running with no convert filters a warning and 10 second # countdown with ^C prompt will result unless we use "--full-balance". diff --git a/src/rockstor/fs/tests/test_btrfs.py b/src/rockstor/fs/tests/test_btrfs.py index 88627577c..fa18dc1c0 100644 --- a/src/rockstor/fs/tests/test_btrfs.py +++ b/src/rockstor/fs/tests/test_btrfs.py @@ -16,7 +16,7 @@ import unittest from datetime import datetime from fs.btrfs import ( - pool_raid, + get_pool_raid_levels, is_subvol, volume_usage, balance_status, @@ -39,6 +39,7 @@ btrfsprogs_legacy, scrub_status_raw, scrub_status_extra, + get_pool_raid_profile, ) from mock import patch @@ -84,7 +85,7 @@ def tearDown(self): def test_get_pool_raid_levels_identification(self): """ Presents the raid identification function with example data & compares - it's return dict to that expected for the given input. :return: 'ok' + its return dict to that expected for the given input. :return: 'ok' if all is as expected or a message indicating which raid level was incorrectly identified given the test data. N.B. Only the first raid level fail is indicated, however all are expected to pass anyway so we @@ -112,6 +113,19 @@ def test_get_pool_raid_levels_identification(self): "globalreserve": "single", "metadata": "single", } + single_dup_fi_df = [ + "Data, single: total=3.00GiB, used=0.00B", + "System, DUP: total=32.00MiB, used=16.00KiB", + "Metadata, DUP: total=768.00MiB, used=144.00KiB", + "GlobalReserve, single: total=3.50MiB, used=0.00B", + "", + ] + single_dup_return = { + "data": "single", + "system": "dup", + "globalreserve": "single", + "metadata": "dup", + } raid0_fi_df = [ "Data, RAID0: total=512.00MiB, used=256.00KiB", "System, RAID0: total=16.00MiB, used=16.00KiB", @@ -208,9 +222,38 @@ def test_get_pool_raid_levels_identification(self): "globalreserve": "single", "metadata": "dup", } + # N.B. observer output after multiple balance events. + # We currently ignore "GlobalReserve" + raid1_1c3_fi_df = [ + "Data, RAID1: total=3.00GiB, used=0.00B", + "System, RAID1C3: total=32.00MiB, used=16.00KiB", + "Metadata, RAID1C3: total=768.00MiB, used=144.00KiB", + "GlobalReserve, single: total=3.50MiB, used=0.00B", + "", + ] + raid1_1c3_return = { + "data": "raid1", + "system": "raid1c3", + "globalreserve": "single", + "metadata": "raid1c3", + } + raid6_1c4_fi_df = [ + "Data, RAID6: total=4.00GiB, used=0.00B", + "System, RAID1C4: total=32.00MiB, used=16.00KiB", + "Metadata, RAID1C4: total=768.00MiB, used=144.00KiB", + "GlobalReserve, single: total=3.50MiB, used=0.00B", + "", + ] + raid6_1c4_return = { + "data": "raid6", + "system": "raid1c4", + "globalreserve": "single", + "metadata": "raid1c4", + } # list used to report what raid level is currently under test. raid_levels_tested = [ "single", + "single-dup", "raid0", "raid1", "raid10", @@ -218,10 +261,13 @@ def test_get_pool_raid_levels_identification(self): "raid6", "raid1_some_single_chunks", "default_sys_pool", + "raid1-1c3", + "raid6-1c4", ] # list of example fi_df outputs in raid_levels_tested order btrfs_fi_di = [ single_fi_df, + single_dup_fi_df, raid0_fi_df, raid1_fi_df, raid10_fi_df, @@ -229,10 +275,13 @@ def test_get_pool_raid_levels_identification(self): raid6_fi_df, raid1_fi_df_some_single_chunks, default_sys_fi_df, + raid1_1c3_fi_df, + raid6_1c4_fi_df, ] # list of correctly parsed return dictionaries return_dict = [ single_return, + single_dup_return, raid0_return, raid1_return, raid10_return, @@ -240,6 +289,8 @@ def test_get_pool_raid_levels_identification(self): raid6_return, raid1_return, default_sys_return, + raid1_1c3_return, + raid6_1c4_return, ] # simple iteration over above example inputs to expected outputs. for raid_level, fi_df, expected_result in map( @@ -249,12 +300,59 @@ def test_get_pool_raid_levels_identification(self): self.mock_run_command.return_value = (fi_df, cmd_e, cmd_rc) # assert get_pool_raid_level returns what we expect. self.assertEqual( - pool_raid(mount_point), + get_pool_raid_levels(mount_point), expected_result, - msg="get_pool_raid_level() miss identified raid " + msg="get_pool_raid_levels() miss identified raid " "level {}".format(raid_level), ) + def test_get_pool_raid_profile(self): + """ + Present get_pool_raid_profile() with example output from get_pool_raid_levels() + and ensure it returns the appropriate profile + """ + # N.B. dict limits test data to unique indexes (expected profiles). + test_raid_levels = { + "raid6-1c4": { + "data": "raid6", + "system": "raid1c4", + "globalreserve": "single", + "metadata": "raid1c4", + }, + "single": { + "data": "single", + "system": "single", + "globalreserve": "single", + "metadata": "single", + }, + "single-dup": { + "data": "single", + "system": "dup", + "globalreserve": "single", + "metadata": "dup", + }, + "unknown": {}, + } + for profile, raid_levels in test_raid_levels.items(): + self.assertEqual( + get_pool_raid_profile(raid_levels), + profile, + msg="get_pool_raid_profile() failed for profile {}".format(profile), + ) + + def test_get_pool_raid_profile_unknown_matched(self): + fake_levels = { + "data": "fakelevel", + "system": "fakelevelmeta", + "globalreserve": "yaf", + "metadata": "fakelevel", + } + self.assertEqual( + get_pool_raid_profile(fake_levels), + "unknown", + msg="matching unknown data-metadata, should return unknown", + ) + def test_is_subvol_exists(self): mount_point = "/mnt2/test-pool/test-share" o = [ diff --git a/src/rockstor/storageadmin/models/pool.py b/src/rockstor/storageadmin/models/pool.py index eaeec3b45..2d05862c9 100644 --- a/src/rockstor/storageadmin/models/pool.py +++ b/src/rockstor/storageadmin/models/pool.py @@ -39,6 +39,7 @@ class Pool(models.Model): uuid = models.CharField(max_length=100, null=True) """size of the pool in KB""" size = models.BigIntegerField(default=0) + """raid expected values defined in PROFILE dict""" raid = models.CharField(max_length=10) toc = models.DateTimeField(auto_now=True) compression = models.CharField(max_length=256, null=True) @@ -155,6 +156,22 @@ def quotas_enabled(self, *args, **kwargs): except: return False + @property + def data_raid(self, *args, **kwargs): + # Convenience property to return data_raid from self.raid + try: + return PROFILE[self.raid].data_raid + except: + return "unknown" + + @property + def metadata_raid(self, *args, **kwargs): + # Convenience property to return metadata_raid from self.raid + try: + return PROFILE[self.raid].metadata_raid + except: + return "unknown" + class Meta: app_label = "storageadmin" ordering = ["-id"] diff --git a/src/rockstor/storageadmin/serializers.py b/src/rockstor/storageadmin/serializers.py index 935b6ea8a..2247e9adf 100644 --- a/src/rockstor/storageadmin/serializers.py +++ b/src/rockstor/storageadmin/serializers.py @@ -90,6 +90,9 @@ class PoolInfoSerializer(serializers.ModelSerializer): dev_stats_ok = serializers.BooleanField() dev_missing_count = serializers.IntegerField() redundancy_exceeded = serializers.BooleanField() + data_raid = serializers.CharField() + metadata_raid = serializers.CharField() + class Meta: model = Pool diff --git a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/add_pool_template.jst b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/add_pool_template.jst index 67a817e3f..57e2891d5 100644 --- a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/add_pool_template.jst +++ b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/add_pool_template.jst @@ -37,12 +37,7 @@
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/pool_info_module.jst b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/pool_info_module.jst index 5a06738bc..3822b29ee 100644 --- a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/pool_info_module.jst +++ b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/pool_info_module.jst @@ -23,7 +23,8 @@

Details

Created on: {{getPoolCreationDate model.toc}}
- Raid configuration: {{model.raid}}
+ Btrfs Raid configuration: {{model.raid}} + (Data: {{model.data_raid}} - Metadata: {{model.metadata_raid}})
Active mount options / Status: {{#if model.is_mounted}} {{model.mount_status}} diff --git a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize/add_disks.jst b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize/add_disks.jst index 9b4d14402..de3b140b0 100644 --- a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize/add_disks.jst +++ b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/resize/add_disks.jst @@ -8,7 +8,9 @@ {{#if model.raidChange}}
- + {{display_raid_levels}} diff --git a/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js b/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js index 985900db3..6416df944 100644 --- a/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js +++ b/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js @@ -363,6 +363,20 @@ AddPoolView = Backbone.View.extend({ Handlebars.registerHelper('humanReadableSize', function (diskSize) { return humanize.filesize(diskSize * 1024); }); + + Handlebars.registerHelper('display_all_raid_levels', function () { + var html = ''; + // Before 6.2.0 kernel + // var levels = ['single', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6']; + // After 6.2.0 kernel + var levels = ['single', 'single-dup', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6', + 'raid1c3', 'raid1c4', "raid1-1c3", "raid1-1c4", "raid10-1c3", + "raid10-1c4", "raid5-1", "raid5-1c3", "raid6-1c3", "raid6-1c4"]; + _.each(levels, function (level) { + html += ''; + }); + return new Handlebars.SafeString(html); + }); } }); diff --git a/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/add_disks.js b/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/add_disks.js index 1f505b0d2..39ac4364d 100644 --- a/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/add_disks.js +++ b/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/add_disks.js @@ -143,7 +143,10 @@ PoolAddDisks = RockstorWizardPage.extend({ Handlebars.registerHelper('display_raid_levels', function(){ var html = ''; var _this = this; - var levels = ['single', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6']; + // var levels = ['single', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6']; + var levels = ['single', 'single-dup', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6', + 'raid1c3', 'raid1c4', "raid1-1c3", "raid1-1c4", "raid10-1c3", + "raid10-1c4", "raid5-1", "raid5-1c3", "raid6-1c3", "raid6-1c4"]; _.each(levels, function(level) { if (_this.raidLevel != level) { html += ''; diff --git a/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/raid_change.js b/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/raid_change.js index 66a39c54c..a6ecb9f46 100644 --- a/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/raid_change.js +++ b/src/rockstor/storageadmin/static/storageadmin/js/views/pool/resize/raid_change.js @@ -77,7 +77,10 @@ PoolRaidChange = RockstorWizardPage.extend({ Handlebars.registerHelper('display_raid_levels', function() { var html = ''; var _this = this; - var levels = ['single', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6']; + // var levels = ['single', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6']; + var levels = ['single', 'single-dup', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6', + 'raid1c3', 'raid1c4', "raid1-1c3", "raid1-1c4", "raid10-1c3", + "raid10-1c4", "raid5-1", "raid5-1c3", "raid6-1c3", "raid6-1c4"]; _.each(levels, function(level) { if (_this.raidLevel != level) { html += ''; diff --git a/src/rockstor/storageadmin/tests/test_commands.py b/src/rockstor/storageadmin/tests/test_commands.py index 2cf77cd23..75a936bbc 100644 --- a/src/rockstor/storageadmin/tests/test_commands.py +++ b/src/rockstor/storageadmin/tests/test_commands.py @@ -32,7 +32,7 @@ def setUpClass(cls): cls.mock_get_pool_info = cls.patch_get_pool_info.start() cls.mock_get_pool_info.return_value = {"disks": [], "label": "pool2"} - cls.patch_pool_raid = patch("storageadmin.views.command.pool_raid") + cls.patch_pool_raid = patch("storageadmin.views.command.get_pool_raid_levels") cls.mock_pool_raid = cls.patch_pool_raid.start() cls.patch_mount_share = patch("storageadmin.views.command.mount_share") diff --git a/src/rockstor/storageadmin/tests/test_disks.py b/src/rockstor/storageadmin/tests/test_disks.py index 1c2d72a2b..b23e9c4cf 100644 --- a/src/rockstor/storageadmin/tests/test_disks.py +++ b/src/rockstor/storageadmin/tests/test_disks.py @@ -46,7 +46,7 @@ def setUpClass(cls): cls.patch_mount_root = patch("storageadmin.views.disk.mount_root") cls.mock_mount_root = cls.patch_mount_root.start() - cls.patch_pool_raid = patch("storageadmin.views.disk.pool_raid") + cls.patch_pool_raid = patch("storageadmin.views.disk.get_pool_raid_levels") cls.mock_pool_raid = cls.patch_pool_raid.start() cls.mock_pool_raid.return_value = {"data": "single", "metadata": "single"} diff --git a/src/rockstor/storageadmin/tests/test_pools.py b/src/rockstor/storageadmin/tests/test_pools.py index 52205f4a8..7f11727c5 100644 --- a/src/rockstor/storageadmin/tests/test_pools.py +++ b/src/rockstor/storageadmin/tests/test_pools.py @@ -22,6 +22,7 @@ import fs.btrfs from storageadmin.models import Disk, Pool, PoolBalance from storageadmin.tests.test_api import APITestMixin +from storageadmin.views.pool import SUPPORTED_PROFILES """ Fixture creation instructions: @@ -193,10 +194,7 @@ def test_invalid_post_requests_raid_level(self): "pname": "invalid-raid-level", "raid_level": "derp", } - e_msg = ( - "Unsupported raid level. Use one of: " - "('single', 'raid0', 'raid1', 'raid10', 'raid5', 'raid6')." - ) + e_msg = "Unsupported raid level. Use one of: {}.".format(SUPPORTED_PROFILES) response = self.client.post(self.BASE_URL, data=data) self.assertEqual( response.status_code, diff --git a/src/rockstor/storageadmin/views/command.py b/src/rockstor/storageadmin/views/command.py index 9343d869b..c105e3f97 100644 --- a/src/rockstor/storageadmin/views/command.py +++ b/src/rockstor/storageadmin/views/command.py @@ -23,7 +23,8 @@ from rest_framework.permissions import IsAuthenticated from storageadmin.views import DiskMixin from system.osi import uptime, kernel_info, get_device_mapper_map -from fs.btrfs import mount_share, mount_root, get_dev_pool_info, pool_raid, mount_snap +from fs.btrfs import mount_share, mount_root, get_dev_pool_info, get_pool_raid_levels, mount_snap, \ + get_pool_raid_profile from system.ssh import sftp_mount_map, sftp_mount from system.osi import ( system_shutdown, @@ -125,7 +126,8 @@ def _refresh_pool_state(): p.uuid = pool_info.uuid p.save() mount_root(p) - p.raid = pool_raid(p.mnt_pt)["data"] + pool_raid_info = get_pool_raid_levels(p.mnt_pt) + p.raid = get_pool_raid_profile(pool_raid_info) p.size = p.usage_bound() # Consider using mount_status() parse to update root pool db on # active (fstab initiated) compression setting. diff --git a/src/rockstor/storageadmin/views/disk.py b/src/rockstor/storageadmin/views/disk.py index a22a2284e..2e8430fa2 100644 --- a/src/rockstor/storageadmin/views/disk.py +++ b/src/rockstor/storageadmin/views/disk.py @@ -24,10 +24,10 @@ enable_quota, mount_root, get_pool_info, - pool_raid, + get_pool_raid_levels, get_dev_pool_info, set_pool_label, - get_devid_usage, + get_devid_usage, get_pool_raid_profile, ) from storageadmin.serializers import DiskInfoSerializer from storageadmin.util import handle_exception @@ -414,7 +414,8 @@ def _update_disk_state(): if pool_name is not None: logger.debug("++++ Creating special system pool db entry.") root_compression = "no" - root_raid = pool_raid("/")["data"] + pool_raid_info = get_pool_raid_levels("/") + root_raid = get_pool_raid_profile(pool_raid_info) # scan_disks() has already acquired our fs uuid so inherit. # We have already established btrfs as the fs type. p = Pool( @@ -867,7 +868,8 @@ def _btrfs_disk_import(self, did, request): do.role = '{"redirect": "%s"}' % device.name do.save() mount_root(po) - po.raid = pool_raid("%s%s" % (settings.MNT_PT, po.name))["data"] + pool_raid_info = get_pool_raid_levels("{}{}".format(settings.MNT_PT, po.name)) + po.raid = get_pool_raid_profile(pool_raid_info) po.size = po.usage_bound() po.save() enable_quota(po) diff --git a/src/rockstor/storageadmin/views/pool.py b/src/rockstor/storageadmin/views/pool.py index 75d1107e4..e79e706c7 100644 --- a/src/rockstor/storageadmin/views/pool.py +++ b/src/rockstor/storageadmin/views/pool.py @@ -49,10 +49,31 @@ logger = logging.getLogger(__name__) +# Currently supported Rockstor btrfs raid profiles. +# See fs.btrfs.PROFILE for all definitions. +SUPPORTED_PROFILES = ( + "single", + "single-dup", + "raid0", + "raid1", + "raid10", + "raid5", + "raid6", + "raid1c3", + "raid1c4", + "raid1-1c3", + "raid1-1c4", + "raid10-1c3", + "raid10-1c4", + "raid5-1", + "raid5-1c3", + "raid6-1c3", + "raid6-1c4", +) + class PoolMixin(object): serializer_class = PoolInfoSerializer - SUPPORTED_PROFILES = ("single", "raid0", "raid1", "raid10", "raid5", "raid6") @staticmethod def _validate_disk(d, request): @@ -410,9 +431,9 @@ def post(self, request): raid_level = request.data["raid_level"] # Reject creation of unsupported raid_level: - if raid_level not in self.SUPPORTED_PROFILES: - e_msg = ("Unsupported raid level. Use one of: {}.").format( - self.SUPPORTED_PROFILES + if raid_level not in SUPPORTED_PROFILES: + e_msg = "Unsupported raid level. Use one of: {}.".format( + SUPPORTED_PROFILES ) handle_exception(Exception(e_msg), request)