From f85b22818b474911cfdc15e7c6be1d2979939888 Mon Sep 17 00:00:00 2001 From: Dhairya Parmar Date: Mon, 22 May 2023 12:34:51 +0530 Subject: [PATCH 1/5] mds: enqueue ~mdsdir at the time of enqueing root This would avoid the need to run individual scrubs for ~mdsdir and root, i.e. run both the scrubs under the same header, this also helps to avoid edge case where in case ~mdsdir is huge and it's taking time to scrub it, the scrub status would report something like this until root inodes kick in: { "status": "scrub active (757 inodes in the stack)", "scrubs": {} } Fixes: https://tracker.ceph.com/issues/59350 Signed-off-by: Dhairya Parmar --- src/mds/MDCache.cc | 17 ++++++----------- src/mds/MDSRank.cc | 7 ------- src/mds/ScrubStack.cc | 15 ++++++++++++++- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index 1624aaab7475f..8e57193997202 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -13015,19 +13015,14 @@ void MDCache::enqueue_scrub( bool is_internal = false; std::string tag_str(tag); - C_MDS_EnqueueScrub *cs; - if ((path == "~mdsdir" && scrub_mdsdir)) { + if (tag_str.empty()) { + uuid_d uuid_gen; + uuid_gen.generate_random(); + tag_str = uuid_gen.to_string(); is_internal = true; - cs = new C_MDS_EnqueueScrub(tag_str, f, fin, false); - } else { - if (tag_str.empty()) { - uuid_d uuid_gen; - uuid_gen.generate_random(); - tag_str = uuid_gen.to_string(); - is_internal = true; - } - cs = new C_MDS_EnqueueScrub(tag_str, f, fin); } + + C_MDS_EnqueueScrub *cs = new C_MDS_EnqueueScrub(tag_str, f, fin); cs->header = std::make_shared(tag_str, is_internal, force, recursive, repair, scrub_mdsdir); diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc index 6b8b24d808e0d..211d2fe3e6d96 100644 --- a/src/mds/MDSRank.cc +++ b/src/mds/MDSRank.cc @@ -2986,13 +2986,6 @@ void MDSRank::command_scrub_start(Formatter *f, } std::lock_guard l(mds_lock); - if (scrub_mdsdir) { - MDSGatherBuilder gather(g_ceph_context); - mdcache->enqueue_scrub("~mdsdir", "", false, true, false, scrub_mdsdir, - f, gather.new_sub()); - gather.set_finisher(new C_MDSInternalNoop); - gather.activate(); - } mdcache->enqueue_scrub(path, tag, force, recursive, repair, scrub_mdsdir, f, on_finish); // scrub_dentry() finishers will dump the data for us; we're done! diff --git a/src/mds/ScrubStack.cc b/src/mds/ScrubStack.cc index 181515c6d6bdc..a544af7665e69 100644 --- a/src/mds/ScrubStack.cc +++ b/src/mds/ScrubStack.cc @@ -119,7 +119,20 @@ int ScrubStack::enqueue(CInode *in, ScrubHeaderRef& header, bool top) << ", conflicting tag " << header->get_tag() << dendl; return -CEPHFS_EEXIST; } - + if (header->get_scrub_mdsdir()) { + filepath fp; + mds_rank_t rank; + rank = mdcache->mds->get_nodeid(); + if(rank >= 0 && rank < MAX_MDS) { + fp.set_path("", MDS_INO_MDSDIR(rank)); + } + int r = _enqueue(mdcache->get_inode(fp.get_ino()), header, true); + if (r < 0) { + return r; + } + //to make sure mdsdir is always on the top + top = false; + } int r = _enqueue(in, header, top); if (r < 0) return r; From 2f06feea33c88798e4e9d654bbff8b05e77f0681 Mon Sep 17 00:00:00 2001 From: Dhairya Parmar Date: Mon, 22 May 2023 16:06:24 +0530 Subject: [PATCH 2/5] mds: dump_values no more needed Previouly, two individual scrubs were initiated to scrub ~mdsdir at root where the ~mdsdir scrub wasn't provided any tag thus, it was necessary to not dump it's values for output of 'scrub start'. Now since mdsdir and root scrub run under single header, there is no need for this anymore, thus removing this redundant code. Fixes: https://tracker.ceph.com/issues/59350 Signed-off-by: Dhairya Parmar --- src/mds/MDCache.cc | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index 8e57193997202..89bf5fea4e85e 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -12960,24 +12960,19 @@ class C_MDS_EnqueueScrub : public Context std::string tag; Formatter *formatter; Context *on_finish; - bool dump_values; public: ScrubHeaderRef header; - C_MDS_EnqueueScrub(std::string_view tag, Formatter *f, Context *fin, - bool dump_values = true) : - tag(tag), formatter(f), on_finish(fin), dump_values(dump_values), - header(nullptr) {} + C_MDS_EnqueueScrub(std::string_view tag, Formatter *f, Context *fin) : + tag(tag), formatter(f), on_finish(fin), header(nullptr) {} void finish(int r) override { - if (dump_values) { - formatter->open_object_section("results"); - formatter->dump_int("return_code", r); - if (r == 0) { - formatter->dump_string("scrub_tag", tag); - formatter->dump_string("mode", "asynchronous"); - } - formatter->close_section(); + formatter->open_object_section("results"); + formatter->dump_int("return_code", r); + if (r == 0) { + formatter->dump_string("scrub_tag", tag); + formatter->dump_string("mode", "asynchronous"); } + formatter->close_section(); r = 0; if (on_finish) From 548af10f1057fe3fb46e23887b012a664b45b7f8 Mon Sep 17 00:00:00 2001 From: Dhairya Parmar Date: Mon, 22 May 2023 16:07:34 +0530 Subject: [PATCH 3/5] mds: remove code to bypass dumping empty header scrub info Previously when ~mdsdir was scrubbed at CephFS root, it's header was kept empty, thus it became necessary to not dump it's values for 'scrub status'. Now since both the scrubs(~mdsdir and root) run under the same header, this code is no more needed. Fixes: https://tracker.ceph.com/issues/59350 Signed-off-by: Dhairya Parmar --- src/mds/ScrubStack.cc | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/mds/ScrubStack.cc b/src/mds/ScrubStack.cc index a544af7665e69..6d799343f1496 100644 --- a/src/mds/ScrubStack.cc +++ b/src/mds/ScrubStack.cc @@ -668,11 +668,6 @@ void ScrubStack::scrub_status(Formatter *f) { have_more = false; auto& header = p.second; - if (mdcache->get_inode(header->get_origin())->is_mdsdir() - && header->get_scrub_mdsdir() && header->get_tag().empty()) { - continue; - } - std::string tag(header->get_tag()); f->open_object_section(tag.c_str()); // scrub id From e40ca408a1343754b80ba1e88e0b669689ada164 Mon Sep 17 00:00:00 2001 From: Dhairya Parmar Date: Mon, 22 May 2023 17:25:38 +0530 Subject: [PATCH 4/5] qa: add recursive flag to test_flag_scrub_mdsdir Code has been changed, in order to scrub ~mdsdir at root, recursive flag also needs to be provided along with scrub_mdsdir. Fixes: https://tracker.ceph.com/issues/59350 Signed-off-by: Dhairya Parmar --- qa/tasks/cephfs/test_scrub_checks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/tasks/cephfs/test_scrub_checks.py b/qa/tasks/cephfs/test_scrub_checks.py index f88929ad265e8..e41b997a6eebc 100644 --- a/qa/tasks/cephfs/test_scrub_checks.py +++ b/qa/tasks/cephfs/test_scrub_checks.py @@ -374,7 +374,7 @@ def test_flag_scrub_mdsdir(self): test flag scrub_mdsdir """ self.scrub_with_stray_evaluation(self.fs, self.mount_a, "/", - "scrub_mdsdir") + "recursive,scrub_mdsdir") @staticmethod def json_validator(json_out, rc, element, expected_value): From 93dfc111bf7e11b3122d2a2df0346b341164dd08 Mon Sep 17 00:00:00 2001 From: Dhairya Parmar Date: Mon, 22 May 2023 17:30:27 +0530 Subject: [PATCH 5/5] doc: users now need to provide scrub_mdsdir and recursive flags for mdsdir scrub at CephFS root. Fixes: https://tracker.ceph.com/issues/59350 Signed-off-by: Dhairya Parmar --- doc/cephfs/scrub.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/cephfs/scrub.rst b/doc/cephfs/scrub.rst index d99aea72b5a71..5b813f1c41ad8 100644 --- a/doc/cephfs/scrub.rst +++ b/doc/cephfs/scrub.rst @@ -151,6 +151,6 @@ Evaluate strays using recursive scrub ceph tell mds.:0 scrub start ~mdsdir recursive - ``~mdsdir`` is not enqueued by default when scrubbing at the CephFS root. In order to perform stray evaluation - at root, run scrub with flag ``scrub_mdsdir``:: + at root, run scrub with flags ``scrub_mdsdir`` and ``recursive``:: - ceph tell mds.:0 scrub start / scrub_mdsdir + ceph tell mds.:0 scrub start / recursive,scrub_mdsdir