-
Notifications
You must be signed in to change notification settings - Fork 5.9k
/
MDCache.h
1409 lines (1194 loc) · 49.1 KB
/
MDCache.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MDCACHE_H
#define CEPH_MDCACHE_H
#include <atomic>
#include <string_view>
#include <thread>
#include "common/DecayCounter.h"
#include "include/common_fwd.h"
#include "include/types.h"
#include "include/filepath.h"
#include "include/elist.h"
#include "messages/MCacheExpire.h"
#include "messages/MClientQuota.h"
#include "messages/MClientRequest.h"
#include "messages/MClientSnap.h"
#include "messages/MDentryLink.h"
#include "messages/MDentryUnlink.h"
#include "messages/MDirUpdate.h"
#include "messages/MDiscover.h"
#include "messages/MDiscoverReply.h"
#include "messages/MGatherCaps.h"
#include "messages/MGenericMessage.h"
#include "messages/MInodeFileCaps.h"
#include "messages/MLock.h"
#include "messages/MMDSCacheRejoin.h"
#include "messages/MMDSFindIno.h"
#include "messages/MMDSFindInoReply.h"
#include "messages/MMDSFragmentNotify.h"
#include "messages/MMDSFragmentNotifyAck.h"
#include "messages/MMDSOpenIno.h"
#include "messages/MMDSOpenInoReply.h"
#include "messages/MMDSResolve.h"
#include "messages/MMDSResolveAck.h"
#include "messages/MMDSPeerRequest.h"
#include "messages/MMDSSnapUpdate.h"
#include "osdc/Filer.h"
#include "CInode.h"
#include "CDentry.h"
#include "CDir.h"
#include "include/Context.h"
#include "events/EMetaBlob.h"
#include "RecoveryQueue.h"
#include "StrayManager.h"
#include "OpenFileTable.h"
#include "MDSContext.h"
#include "MDSMap.h"
#include "Mutation.h"
class MDSRank;
class Session;
class Migrator;
class Session;
class ESubtreeMap;
enum {
l_mdc_first = 3000,
// dir updates for replication
l_mdc_dir_update,
l_mdc_dir_update_receipt,
l_mdc_dir_try_discover,
l_mdc_dir_send_discover,
l_mdc_dir_handle_discover,
// How many inodes currently in stray dentries
l_mdc_num_strays,
// How many stray dentries are currently delayed for purge due to refs
l_mdc_num_strays_delayed,
// How many stray dentries are currently being enqueued for purge
l_mdc_num_strays_enqueuing,
// How many dentries have ever been added to stray dir
l_mdc_strays_created,
// How many dentries have been passed on to PurgeQueue
l_mdc_strays_enqueued,
// How many strays have been reintegrated?
l_mdc_strays_reintegrated,
// How many strays have been migrated?
l_mdc_strays_migrated,
// How many inode sizes currently being recovered
l_mdc_num_recovering_processing,
// How many inodes currently waiting to have size recovered
l_mdc_num_recovering_enqueued,
// How many inodes waiting with elevated priority for recovery
l_mdc_num_recovering_prioritized,
// How many inodes ever started size recovery
l_mdc_recovery_started,
// How many inodes ever completed size recovery
l_mdc_recovery_completed,
l_mdss_ireq_enqueue_scrub,
l_mdss_ireq_exportdir,
l_mdss_ireq_flush,
l_mdss_ireq_fragmentdir,
l_mdss_ireq_fragstats,
l_mdss_ireq_inodestats,
l_mdc_last,
};
// flags for path_traverse();
static const int MDS_TRAVERSE_DISCOVER = (1 << 0);
static const int MDS_TRAVERSE_PATH_LOCKED = (1 << 1);
static const int MDS_TRAVERSE_WANT_DENTRY = (1 << 2);
static const int MDS_TRAVERSE_WANT_AUTH = (1 << 3);
static const int MDS_TRAVERSE_RDLOCK_SNAP = (1 << 4);
static const int MDS_TRAVERSE_RDLOCK_SNAP2 = (1 << 5);
static const int MDS_TRAVERSE_WANT_DIRLAYOUT = (1 << 6);
static const int MDS_TRAVERSE_RDLOCK_PATH = (1 << 7);
static const int MDS_TRAVERSE_XLOCK_DENTRY = (1 << 8);
static const int MDS_TRAVERSE_RDLOCK_AUTHLOCK = (1 << 9);
static const int MDS_TRAVERSE_CHECK_LOCKCACHE = (1 << 10);
// flags for predirty_journal_parents()
static const int PREDIRTY_PRIMARY = 1; // primary dn, adjust nested accounting
static const int PREDIRTY_DIR = 2; // update parent dir mtime/size
static const int PREDIRTY_SHALLOW = 4; // only go to immediate parent (for easier rollback)
class MDCache {
public:
typedef std::map<mds_rank_t, ref_t<MCacheExpire>> expiremap;
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
// -- discover --
struct discover_info_t {
discover_info_t() {}
~discover_info_t() {
if (basei)
basei->put(MDSCacheObject::PIN_DISCOVERBASE);
}
void pin_base(CInode *b) {
basei = b;
basei->get(MDSCacheObject::PIN_DISCOVERBASE);
}
ceph_tid_t tid = 0;
mds_rank_t mds = -1;
inodeno_t ino;
frag_t frag;
snapid_t snap = CEPH_NOSNAP;
filepath want_path;
CInode *basei = nullptr;
bool want_base_dir = false;
bool path_locked = false;
};
// [reconnect/rejoin caps]
struct reconnected_cap_info_t {
reconnected_cap_info_t() {}
inodeno_t realm_ino = 0;
snapid_t snap_follows = 0;
int dirty_caps = 0;
bool snapflush = 0;
};
// -- find_ino_peer --
struct find_ino_peer_info_t {
find_ino_peer_info_t() {}
inodeno_t ino;
ceph_tid_t tid = 0;
MDSContext *fin = nullptr;
bool path_locked = false;
mds_rank_t hint = MDS_RANK_NONE;
mds_rank_t checking = MDS_RANK_NONE;
std::set<mds_rank_t> checked;
};
friend class C_MDC_RejoinOpenInoFinish;
friend class C_MDC_RejoinSessionsOpened;
friend class Locker;
friend class Migrator;
friend class MDBalancer;
// StrayManager needs to be able to remove_inode() from us
// when it is done purging
friend class StrayManager;
explicit MDCache(MDSRank *m, PurgeQueue &purge_queue_);
~MDCache();
uint64_t cache_limit_memory(void) {
return cache_memory_limit;
}
double cache_toofull_ratio(void) const {
double memory_reserve = cache_memory_limit*(1.0-cache_reservation);
return fmax(0.0, (cache_size()-memory_reserve)/memory_reserve);
}
bool cache_toofull(void) const {
return cache_toofull_ratio() > 0.0;
}
uint64_t cache_size(void) const {
return mempool::get_pool(mempool::mds_co::id).allocated_bytes();
}
bool cache_overfull(void) const {
return cache_size() > cache_memory_limit*cache_health_threshold;
}
void advance_stray();
unsigned get_ephemeral_dist_frag_bits() const {
return export_ephemeral_dist_frag_bits;
}
bool get_export_ephemeral_distributed_config(void) const {
return export_ephemeral_distributed_config;
}
bool get_export_ephemeral_random_config(void) const {
return export_ephemeral_random_config;
}
bool get_symlink_recovery(void) const {
return symlink_recovery;
}
/**
* Call this when you know that a CDentry is ready to be passed
* on to StrayManager (i.e. this is a stray you've just created)
*/
void notify_stray(CDentry *dn) {
ceph_assert(dn->get_dir()->get_inode()->is_stray());
if (dn->state_test(CDentry::STATE_PURGING))
return;
stray_manager.eval_stray(dn);
}
mds_rank_t hash_into_rank_bucket(inodeno_t ino, frag_t fg=0);
void maybe_eval_stray(CInode *in, bool delay=false);
void clear_dirty_bits_for_stray(CInode* diri);
bool is_readonly() { return readonly; }
void force_readonly();
static file_layout_t gen_default_file_layout(const MDSMap &mdsmap);
static file_layout_t gen_default_log_layout(const MDSMap &mdsmap);
void register_perfcounters();
void touch_client_lease(ClientLease *r, int pool, utime_t ttl) {
client_leases[pool].push_back(&r->item_lease);
r->ttl = ttl;
}
void notify_stray_removed()
{
stray_manager.notify_stray_removed();
}
void notify_stray_created()
{
stray_manager.notify_stray_created();
}
void eval_remote(CDentry *dn)
{
stray_manager.eval_remote(dn);
}
void _send_discover(discover_info_t& dis);
discover_info_t& _create_discover(mds_rank_t mds) {
ceph_tid_t t = ++discover_last_tid;
discover_info_t& d = discovers[t];
d.tid = t;
d.mds = mds;
return d;
}
void discover_base_ino(inodeno_t want_ino, MDSContext *onfinish, mds_rank_t from=MDS_RANK_NONE);
void discover_dir_frag(CInode *base, frag_t approx_fg, MDSContext *onfinish,
mds_rank_t from=MDS_RANK_NONE);
void discover_path(CInode *base, snapid_t snap, filepath want_path, MDSContext *onfinish,
bool path_locked=false, mds_rank_t from=MDS_RANK_NONE);
void discover_path(CDir *base, snapid_t snap, filepath want_path, MDSContext *onfinish,
bool path_locked=false);
void kick_discovers(mds_rank_t who); // after a failure.
// adjust subtree auth specification
// dir->dir_auth
// imports/exports/nested_exports
// join/split subtrees as appropriate
bool is_subtrees() { return !subtrees.empty(); }
template<typename T>
void get_subtrees(T& c) {
if constexpr (std::is_same_v<T, std::vector<CDir*>>)
c.reserve(c.size() + subtrees.size());
for (const auto& p : subtrees) {
c.push_back(p.first);
}
}
void adjust_subtree_auth(CDir *root, mds_authority_t auth, bool adjust_pop=true);
void adjust_subtree_auth(CDir *root, mds_rank_t a, mds_rank_t b=CDIR_AUTH_UNKNOWN) {
adjust_subtree_auth(root, mds_authority_t(a,b));
}
void adjust_bounded_subtree_auth(CDir *dir, const std::set<CDir*>& bounds, mds_authority_t auth);
void adjust_bounded_subtree_auth(CDir *dir, const std::set<CDir*>& bounds, mds_rank_t a) {
adjust_bounded_subtree_auth(dir, bounds, mds_authority_t(a, CDIR_AUTH_UNKNOWN));
}
void adjust_bounded_subtree_auth(CDir *dir, const std::vector<dirfrag_t>& bounds, const mds_authority_t &auth);
void adjust_bounded_subtree_auth(CDir *dir, const std::vector<dirfrag_t>& bounds, mds_rank_t a) {
adjust_bounded_subtree_auth(dir, bounds, mds_authority_t(a, CDIR_AUTH_UNKNOWN));
}
void map_dirfrag_set(const std::list<dirfrag_t>& dfs, std::set<CDir*>& result);
void try_subtree_merge(CDir *root);
void try_subtree_merge_at(CDir *root, std::set<CInode*> *to_eval, bool adjust_pop=true);
void eval_subtree_root(CInode *diri);
CDir *get_subtree_root(CDir *dir);
CDir *get_projected_subtree_root(CDir *dir);
bool is_leaf_subtree(CDir *dir) {
ceph_assert(subtrees.count(dir));
return subtrees[dir].empty();
}
void remove_subtree(CDir *dir);
bool is_subtree(CDir *root) {
return subtrees.count(root);
}
void get_subtree_bounds(CDir *root, std::set<CDir*>& bounds);
void get_wouldbe_subtree_bounds(CDir *root, std::set<CDir*>& bounds);
void verify_subtree_bounds(CDir *root, const std::set<CDir*>& bounds);
void verify_subtree_bounds(CDir *root, const std::list<dirfrag_t>& bounds);
void project_subtree_rename(CInode *diri, CDir *olddir, CDir *newdir);
void adjust_subtree_after_rename(CInode *diri, CDir *olddir, bool pop);
auto get_auth_subtrees() {
std::vector<CDir*> c;
for (auto& p : subtrees) {
auto& root = p.first;
if (root->is_auth()) {
c.push_back(root);
}
}
return c;
}
auto get_fullauth_subtrees() {
std::vector<CDir*> c;
for (auto& p : subtrees) {
auto& root = p.first;
if (root->is_full_dir_auth()) {
c.push_back(root);
}
}
return c;
}
auto num_subtrees_fullauth() const {
std::size_t n = 0;
for (auto& p : subtrees) {
auto& root = p.first;
if (root->is_full_dir_auth()) {
++n;
}
}
return n;
}
auto num_subtrees_fullnonauth() const {
std::size_t n = 0;
for (auto& p : subtrees) {
auto& root = p.first;
if (root->is_full_dir_nonauth()) {
++n;
}
}
return n;
}
auto num_subtrees() const {
return subtrees.size();
}
int get_num_client_requests();
MDRequestRef request_start(const cref_t<MClientRequest>& req);
MDRequestRef request_start_peer(metareqid_t rid, __u32 attempt, const cref_t<Message> &m);
MDRequestRef request_start_internal(int op);
bool have_request(metareqid_t rid) {
return active_requests.count(rid);
}
MDRequestRef request_get(metareqid_t rid);
void request_pin_ref(MDRequestRef& r, CInode *ref, std::vector<CDentry*>& trace);
void request_finish(MDRequestRef& mdr);
void request_forward(MDRequestRef& mdr, mds_rank_t mds, int port=0);
void dispatch_request(MDRequestRef& mdr);
void request_drop_foreign_locks(MDRequestRef& mdr);
void request_drop_non_rdlocks(MDRequestRef& r);
void request_drop_locks(MDRequestRef& r);
void request_cleanup(MDRequestRef& r);
void request_kill(MDRequestRef& r); // called when session closes
// journal/snap helpers
CInode *pick_inode_snap(CInode *in, snapid_t follows);
CInode *cow_inode(CInode *in, snapid_t last);
void journal_cow_dentry(MutationImpl *mut, EMetaBlob *metablob, CDentry *dn,
snapid_t follows=CEPH_NOSNAP,
CInode **pcow_inode=0, CDentry::linkage_t *dnl=0);
void journal_dirty_inode(MutationImpl *mut, EMetaBlob *metablob, CInode *in, snapid_t follows=CEPH_NOSNAP);
void project_rstat_inode_to_frag(const MutationRef& mut,
CInode *cur, CDir *parent, snapid_t first,
int linkunlink, SnapRealm *prealm);
void _project_rstat_inode_to_frag(const CInode::mempool_inode* inode, snapid_t ofirst, snapid_t last,
CDir *parent, int linkunlink, bool update_inode);
void project_rstat_frag_to_inode(const nest_info_t& rstat, const nest_info_t& accounted_rstat,
snapid_t ofirst, snapid_t last, CInode *pin, bool cow_head);
void broadcast_quota_to_client(CInode *in, client_t exclude_ct = -1, bool quota_change = false);
void predirty_journal_parents(MutationRef mut, EMetaBlob *blob,
CInode *in, CDir *parent,
int flags, int linkunlink=0,
snapid_t follows=CEPH_NOSNAP);
// peers
void add_uncommitted_leader(metareqid_t reqid, LogSegment *ls, std::set<mds_rank_t> &peers, bool safe=false) {
uncommitted_leaders[reqid].ls = ls;
uncommitted_leaders[reqid].peers = peers;
uncommitted_leaders[reqid].safe = safe;
}
void wait_for_uncommitted_leader(metareqid_t reqid, MDSContext *c) {
uncommitted_leaders[reqid].waiters.push_back(c);
}
bool have_uncommitted_leader(metareqid_t reqid, mds_rank_t from) {
auto p = uncommitted_leaders.find(reqid);
return p != uncommitted_leaders.end() && p->second.peers.count(from) > 0;
}
void log_leader_commit(metareqid_t reqid);
void logged_leader_update(metareqid_t reqid);
void _logged_leader_commit(metareqid_t reqid);
void committed_leader_peer(metareqid_t r, mds_rank_t from);
void finish_committed_leaders();
void add_uncommitted_peer(metareqid_t reqid, LogSegment*, mds_rank_t, MDPeerUpdate *su=nullptr);
void wait_for_uncommitted_peer(metareqid_t reqid, MDSContext *c) {
uncommitted_peers.at(reqid).waiters.push_back(c);
}
void finish_uncommitted_peer(metareqid_t reqid, bool assert_exist=true);
MDPeerUpdate* get_uncommitted_peer(metareqid_t reqid, mds_rank_t leader);
void _logged_peer_commit(mds_rank_t from, metareqid_t reqid);
void set_recovery_set(std::set<mds_rank_t>& s);
void handle_mds_failure(mds_rank_t who);
void handle_mds_recovery(mds_rank_t who);
void recalc_auth_bits(bool replay);
void remove_inode_recursive(CInode *in);
bool is_ambiguous_peer_update(metareqid_t reqid, mds_rank_t leader) {
auto p = ambiguous_peer_updates.find(leader);
return p != ambiguous_peer_updates.end() && p->second.count(reqid);
}
void add_ambiguous_peer_update(metareqid_t reqid, mds_rank_t leader) {
ambiguous_peer_updates[leader].insert(reqid);
}
void remove_ambiguous_peer_update(metareqid_t reqid, mds_rank_t leader) {
auto p = ambiguous_peer_updates.find(leader);
auto q = p->second.find(reqid);
ceph_assert(q != p->second.end());
p->second.erase(q);
if (p->second.empty())
ambiguous_peer_updates.erase(p);
}
void add_rollback(metareqid_t reqid, mds_rank_t leader) {
resolve_need_rollback[reqid] = leader;
}
void finish_rollback(metareqid_t reqid, MDRequestRef& mdr);
// ambiguous imports
void add_ambiguous_import(dirfrag_t base, const std::vector<dirfrag_t>& bounds);
void add_ambiguous_import(CDir *base, const std::set<CDir*>& bounds);
bool have_ambiguous_import(dirfrag_t base) {
return my_ambiguous_imports.count(base);
}
void get_ambiguous_import_bounds(dirfrag_t base, std::vector<dirfrag_t>& bounds) {
ceph_assert(my_ambiguous_imports.count(base));
bounds = my_ambiguous_imports[base];
}
void cancel_ambiguous_import(CDir *);
void finish_ambiguous_import(dirfrag_t dirino);
void resolve_start(MDSContext *resolve_done_);
void send_resolves();
void maybe_send_pending_resolves() {
if (resolves_pending)
send_subtree_resolves();
}
void _move_subtree_map_bound(dirfrag_t df, dirfrag_t oldparent, dirfrag_t newparent,
std::map<dirfrag_t,std::vector<dirfrag_t> >& subtrees);
ESubtreeMap *create_subtree_map();
void clean_open_file_lists();
void dump_openfiles(Formatter *f);
bool dump_inode(Formatter *f, uint64_t number);
void rejoin_start(MDSContext *rejoin_done_);
void rejoin_gather_finish();
void rejoin_send_rejoins();
void rejoin_export_caps(inodeno_t ino, client_t client, const cap_reconnect_t& icr,
int target=-1, bool drop_path=false) {
auto& ex = cap_exports[ino];
ex.first = target;
auto &_icr = ex.second[client] = icr;
if (drop_path)
_icr.path.clear();
}
void rejoin_recovered_caps(inodeno_t ino, client_t client, const cap_reconnect_t& icr,
mds_rank_t frommds=MDS_RANK_NONE, bool drop_path=false) {
auto &_icr = cap_imports[ino][client][frommds] = icr;
if (drop_path)
_icr.path.clear();
}
void rejoin_recovered_client(client_t client, const entity_inst_t& inst) {
rejoin_client_map.emplace(client, inst);
}
bool rejoin_has_cap_reconnect(inodeno_t ino) const {
return cap_imports.count(ino);
}
void add_replay_ino_alloc(inodeno_t ino) {
cap_imports_missing.insert(ino); // avoid opening ino during cache rejoin
}
const cap_reconnect_t *get_replay_cap_reconnect(inodeno_t ino, client_t client) {
if (cap_imports.count(ino) &&
cap_imports[ino].count(client) &&
cap_imports[ino][client].count(MDS_RANK_NONE)) {
return &cap_imports[ino][client][MDS_RANK_NONE];
}
return NULL;
}
void remove_replay_cap_reconnect(inodeno_t ino, client_t client) {
ceph_assert(cap_imports[ino].size() == 1);
ceph_assert(cap_imports[ino][client].size() == 1);
cap_imports.erase(ino);
}
void wait_replay_cap_reconnect(inodeno_t ino, MDSContext *c) {
cap_reconnect_waiters[ino].push_back(c);
}
void add_reconnected_cap(client_t client, inodeno_t ino, const cap_reconnect_t& icr) {
reconnected_cap_info_t &info = reconnected_caps[ino][client];
info.realm_ino = inodeno_t(icr.capinfo.snaprealm);
info.snap_follows = icr.snap_follows;
}
void set_reconnected_dirty_caps(client_t client, inodeno_t ino, int dirty, bool snapflush) {
reconnected_cap_info_t &info = reconnected_caps[ino][client];
info.dirty_caps |= dirty;
if (snapflush)
info.snapflush = snapflush;
}
void add_reconnected_snaprealm(client_t client, inodeno_t ino, snapid_t seq) {
reconnected_snaprealms[ino][client] = seq;
}
void rejoin_open_ino_finish(inodeno_t ino, int ret);
void rejoin_prefetch_ino_finish(inodeno_t ino, int ret);
void rejoin_open_sessions_finish(std::map<client_t,std::pair<Session*,uint64_t> >& session_map);
bool process_imported_caps();
void choose_lock_states_and_reconnect_caps();
void prepare_realm_split(SnapRealm *realm, client_t client, inodeno_t ino,
std::map<client_t,ref_t<MClientSnap>>& splits);
void prepare_realm_merge(SnapRealm *realm, SnapRealm *parent_realm, std::map<client_t,ref_t<MClientSnap>>& splits);
void send_snaps(std::map<client_t,ref_t<MClientSnap>>& splits);
Capability* rejoin_import_cap(CInode *in, client_t client, const cap_reconnect_t& icr, mds_rank_t frommds);
void finish_snaprealm_reconnect(client_t client, SnapRealm *realm, snapid_t seq,
std::map<client_t,ref_t<MClientSnap>>& updates);
Capability* try_reconnect_cap(CInode *in, Session *session);
void export_remaining_imported_caps();
void do_cap_import(Session *session, CInode *in, Capability *cap,
uint64_t p_cap_id, ceph_seq_t p_seq, ceph_seq_t p_mseq,
int peer, int p_flags);
void do_delayed_cap_imports();
void rebuild_need_snapflush(CInode *head_in, SnapRealm *realm, client_t client,
snapid_t snap_follows);
void open_snaprealms();
bool open_undef_inodes_dirfrags();
void opened_undef_inode(CInode *in);
void opened_undef_dirfrag(CDir *dir) {
rejoin_undef_dirfrags.erase(dir);
}
void reissue_all_caps();
void start_files_to_recover();
void do_file_recover();
void queue_file_recover(CInode *in);
void _queued_file_recover_cow(CInode *in, MutationRef& mut);
void handle_conf_change(const std::set<std::string>& changed, const MDSMap& mds_map);
// debug
void log_stat();
// root inode
CInode *get_root() { return root; }
CInode *get_myin() { return myin; }
size_t get_cache_size() { return lru.lru_get_size(); }
// trimming
std::pair<bool, uint64_t> trim(uint64_t count=0);
bool trim_non_auth_subtree(CDir *directory);
void standby_trim_segment(LogSegment *ls);
void try_trim_non_auth_subtree(CDir *dir);
bool can_trim_non_auth_dirfrag(CDir *dir) {
return my_ambiguous_imports.count((dir)->dirfrag()) == 0 &&
uncommitted_peer_rename_olddir.count(dir->inode) == 0;
}
/**
* For all unreferenced inodes, dirs, dentries below an inode, compose
* expiry messages. This is used when giving up all replicas of entities
* for an MDS peer in the 'stopping' state, such that the peer can
* empty its cache and finish shutting down.
*
* We have to make sure we're only expiring un-referenced items to
* avoid interfering with ongoing stray-movement (we can't distinguish
* between the "moving my strays" and "waiting for my cache to empty"
* phases within 'stopping')
*
* @return false if we completed cleanly, true if caller should stop
* expiring because we hit something with refs.
*/
bool expire_recursive(CInode *in, expiremap& expiremap);
void trim_client_leases();
void check_memory_usage();
void shutdown_start();
void shutdown_check();
bool shutdown_pass();
bool shutdown(); // clear cache (ie at shutodwn)
bool shutdown_export_strays();
void shutdown_export_stray_finish(inodeno_t ino) {
if (shutdown_exporting_strays.erase(ino))
shutdown_export_strays();
}
// inode_map
bool have_inode(vinodeno_t vino) {
if (vino.snapid == CEPH_NOSNAP)
return inode_map.count(vino.ino) ? true : false;
else
return snap_inode_map.count(vino) ? true : false;
}
bool have_inode(inodeno_t ino, snapid_t snap=CEPH_NOSNAP) {
return have_inode(vinodeno_t(ino, snap));
}
CInode* get_inode(vinodeno_t vino) {
if (vino.snapid == CEPH_NOSNAP) {
auto p = inode_map.find(vino.ino);
if (p != inode_map.end())
return p->second;
} else {
auto p = snap_inode_map.find(vino);
if (p != snap_inode_map.end())
return p->second;
}
return NULL;
}
CInode* get_inode(inodeno_t ino, snapid_t s=CEPH_NOSNAP) {
return get_inode(vinodeno_t(ino, s));
}
CInode* lookup_snap_inode(vinodeno_t vino) {
auto p = snap_inode_map.lower_bound(vino);
if (p != snap_inode_map.end() &&
p->second->ino() == vino.ino && p->second->first <= vino.snapid)
return p->second;
return NULL;
}
CDir* get_dirfrag(dirfrag_t df) {
CInode *in = get_inode(df.ino);
if (!in)
return NULL;
return in->get_dirfrag(df.frag);
}
CDir* get_dirfrag(inodeno_t ino, std::string_view dn) {
CInode *in = get_inode(ino);
if (!in)
return NULL;
frag_t fg = in->pick_dirfrag(dn);
return in->get_dirfrag(fg);
}
CDir* get_force_dirfrag(dirfrag_t df, bool replay) {
CInode *diri = get_inode(df.ino);
if (!diri)
return NULL;
CDir *dir = force_dir_fragment(diri, df.frag, replay);
if (!dir)
dir = diri->get_dirfrag(df.frag);
return dir;
}
MDSCacheObject *get_object(const MDSCacheObjectInfo &info);
void add_inode(CInode *in);
void remove_inode(CInode *in);
void touch_dentry(CDentry *dn) {
if (dn->state_test(CDentry::STATE_BOTTOMLRU)) {
bottom_lru.lru_midtouch(dn);
} else {
if (dn->is_auth())
lru.lru_touch(dn);
else
lru.lru_midtouch(dn);
}
}
void touch_dentry_bottom(CDentry *dn) {
if (dn->state_test(CDentry::STATE_BOTTOMLRU))
return;
lru.lru_bottouch(dn);
}
// truncate
void truncate_inode(CInode *in, LogSegment *ls);
void _truncate_inode(CInode *in, LogSegment *ls);
void truncate_inode_finish(CInode *in, LogSegment *ls);
void truncate_inode_write_finish(CInode *in, LogSegment *ls,
uint32_t block_size);
void truncate_inode_logged(CInode *in, MutationRef& mut);
void add_recovered_truncate(CInode *in, LogSegment *ls);
void remove_recovered_truncate(CInode *in, LogSegment *ls);
void start_recovered_truncates();
// purge unsafe inodes
void start_purge_inodes();
void purge_inodes(const interval_set<inodeno_t>& i, LogSegment *ls);
CDir *get_auth_container(CDir *in);
CDir *get_export_container(CDir *dir);
void find_nested_exports(CDir *dir, std::set<CDir*>& s);
void find_nested_exports_under(CDir *import, CDir *dir, std::set<CDir*>& s);
void init_layouts();
void create_unlinked_system_inode(CInode *in, inodeno_t ino,
int mode) const;
CInode *create_system_inode(inodeno_t ino, int mode);
CInode *create_root_inode();
void create_empty_hierarchy(MDSGather *gather);
void create_mydir_hierarchy(MDSGather *gather);
bool is_open() { return open; }
void wait_for_open(MDSContext *c) {
waiting_for_open.push_back(c);
}
void open_root_inode(MDSContext *c);
void open_root();
void open_mydir_inode(MDSContext *c);
void open_mydir_frag(MDSContext *c);
void populate_mydir();
void _create_system_file(CDir *dir, std::string_view name, CInode *in, MDSContext *fin);
void _create_system_file_finish(MutationRef& mut, CDentry *dn,
version_t dpv, MDSContext *fin);
void open_foreign_mdsdir(inodeno_t ino, MDSContext *c);
CDir *get_stray_dir(CInode *in);
/**
* Find the given dentry (and whether it exists or not), its ancestors,
* and get them all into memory and usable on this MDS. This function
* makes a best-effort attempt to load everything; if it needs to
* go away and do something then it will put the request on a waitlist.
* It prefers the mdr, then the req, then the fin. (At least one of these
* must be non-null.)
*
* At least one of the params mdr, req, and fin must be non-null.
*
* @param mdr The MDRequest associated with the path. Can be null.
* @param cf A MDSContextFactory for waiter building.
* @param path The path to traverse to.
*
* @param flags Specifies different lookup behaviors.
* By default, path_traverse() forwards the request to the auth MDS if that
* is appropriate (ie, if it doesn't know the contents of a directory).
* MDS_TRAVERSE_DISCOVER: Instead of forwarding request, path_traverse()
* attempts to look up the path from a different MDS (and bring them into
* its cache as replicas).
* MDS_TRAVERSE_PATH_LOCKED: path_traverse() will procceed when xlocked
* dentry is encountered.
* MDS_TRAVERSE_WANT_DENTRY: Caller wants tail dentry. Add a null dentry if
* tail dentry does not exist. return 0 even tail dentry is null.
* MDS_TRAVERSE_WANT_AUTH: Always forward request to auth MDS of target inode
* or auth MDS of tail dentry (MDS_TRAVERSE_WANT_DENTRY is set).
*
* @param pdnvec Data return parameter -- on success, contains a
* vector of dentries. On failure, is either empty or contains the
* full trace of traversable dentries.
* @param pin Data return parameter -- if successful, points to the inode
* associated with filepath. If unsuccessful, is null.
*
* @returns 0 on success, 1 on "not done yet", 2 on "forwarding", -errno otherwise.
* If it returns 1, the requester associated with this call has been placed
* on the appropriate waitlist, and it should unwind itself and back out.
* If it returns 2 the request has been forwarded, and again the requester
* should unwind itself and back out.
*/
int path_traverse(MDRequestRef& mdr, MDSContextFactory& cf,
const filepath& path, int flags,
std::vector<CDentry*> *pdnvec, CInode **pin=nullptr);
CInode *cache_traverse(const filepath& path);
void open_remote_dirfrag(CInode *diri, frag_t fg, MDSContext *fin);
CInode *get_dentry_inode(CDentry *dn, MDRequestRef& mdr, bool projected=false);
bool parallel_fetch(std::map<inodeno_t,filepath>& pathmap, std::set<inodeno_t>& missing);
bool parallel_fetch_traverse_dir(inodeno_t ino, filepath& path,
std::set<CDir*>& fetch_queue, std::set<inodeno_t>& missing,
C_GatherBuilder &gather_bld);
void open_remote_dentry(CDentry *dn, bool projected, MDSContext *fin,
bool want_xlocked=false);
void _open_remote_dentry_finish(CDentry *dn, inodeno_t ino, MDSContext *fin,
bool want_xlocked, int r);
void make_trace(std::vector<CDentry*>& trace, CInode *in);
void open_ino(inodeno_t ino, int64_t pool, MDSContext *fin,
bool want_replica=true, bool want_xlocked=false,
std::vector<inode_backpointer_t> *ancestors_hint=nullptr,
mds_rank_t auth_hint=MDS_RANK_NONE);
void open_ino_batch_start();
void open_ino_batch_submit();
void kick_open_ino_peers(mds_rank_t who);
void find_ino_peers(inodeno_t ino, MDSContext *c,
mds_rank_t hint=MDS_RANK_NONE, bool path_locked=false);
void _do_find_ino_peer(find_ino_peer_info_t& fip);
void handle_find_ino(const cref_t<MMDSFindIno> &m);
void handle_find_ino_reply(const cref_t<MMDSFindInoReply> &m);
void kick_find_ino_peers(mds_rank_t who);
SnapRealm *get_global_snaprealm() const { return global_snaprealm; }
void create_global_snaprealm();
void do_realm_invalidate_and_update_notify(CInode *in, int snapop, bool notify_clients=true);
void send_snap_update(CInode *in, version_t stid, int snap_op);
void handle_snap_update(const cref_t<MMDSSnapUpdate> &m);
void notify_global_snaprealm_update(int snap_op);
// -- stray --
void fetch_backtrace(inodeno_t ino, int64_t pool, bufferlist& bl, Context *fin);
uint64_t get_num_strays() const { return stray_manager.get_num_strays(); }
// == messages ==
void dispatch(const cref_t<Message> &m);
void encode_replica_dir(CDir *dir, mds_rank_t to, bufferlist& bl);
void encode_replica_dentry(CDentry *dn, mds_rank_t to, bufferlist& bl);
void encode_replica_inode(CInode *in, mds_rank_t to, bufferlist& bl,
uint64_t features);
void decode_replica_dir(CDir *&dir, bufferlist::const_iterator& p, CInode *diri, mds_rank_t from, MDSContext::vec& finished);
void decode_replica_dentry(CDentry *&dn, bufferlist::const_iterator& p, CDir *dir, MDSContext::vec& finished);
void decode_replica_inode(CInode *&in, bufferlist::const_iterator& p, CDentry *dn, MDSContext::vec& finished);
void encode_replica_stray(CDentry *straydn, mds_rank_t who, bufferlist& bl);
void decode_replica_stray(CDentry *&straydn, CInode **in, const bufferlist &bl, mds_rank_t from);
// -- namespace --
void encode_remote_dentry_link(CDentry::linkage_t *dnl, bufferlist& bl);
void decode_remote_dentry_link(CDir *dir, CDentry *dn, bufferlist::const_iterator& p);
void send_dentry_link(CDentry *dn, MDRequestRef& mdr);
void send_dentry_unlink(CDentry *dn, CDentry *straydn, MDRequestRef& mdr, bool unlinking=false);
void wait_for_uncommitted_fragment(dirfrag_t dirfrag, MDSContext *c) {
uncommitted_fragments.at(dirfrag).waiters.push_back(c);
}
bool is_any_uncommitted_fragment() const {
return !uncommitted_fragments.empty();
}
void wait_for_uncommitted_fragments(MDSContext* finisher);
void rollback_uncommitted_fragments();
void split_dir(CDir *dir, int byn);
void merge_dir(CInode *diri, frag_t fg);
void find_stale_fragment_freeze();
void fragment_freeze_inc_num_waiters(CDir *dir);
bool fragment_are_all_frozen(CDir *dir);
int get_num_fragmenting_dirs() { return fragments.size(); }
// -- updates --
//int send_inode_updates(CInode *in);
//void handle_inode_update(MInodeUpdate *m);
int send_dir_updates(CDir *in, bool bcast=false);
void handle_dir_update(const cref_t<MDirUpdate> &m);
// -- cache expiration --
void handle_cache_expire(const cref_t<MCacheExpire> &m);
void process_delayed_expire(CDir *dir);
void discard_delayed_expire(CDir *dir);
// -- mdsmap --
void handle_mdsmap(const MDSMap &mdsmap, const MDSMap &oldmap);
int dump_cache() { return dump_cache({}, nullptr, 0); }
int dump_cache(std::string_view filename, double timeout);
int dump_cache(Formatter *f, double timeout);
void dump_tree(CInode *in, const int cur_depth, const int max_depth, Formatter *f);
void cache_status(Formatter *f);
void dump_resolve_status(Formatter *f) const;
void dump_rejoin_status(Formatter *f) const;
// == crap fns ==
void show_cache();
void show_subtrees(int dbl=10, bool force_print=false);
CInode *hack_pick_random_inode() {
ceph_assert(!inode_map.empty());
int n = rand() % inode_map.size();
auto p = inode_map.begin();
while (n--) ++p;
return p->second;
}
void flush_dentry(std::string_view path, Context *fin);
/**
* Create and start an OP_ENQUEUE_SCRUB
*/
void enqueue_scrub(std::string_view path, std::string_view tag,
bool force, bool recursive, bool repair,
Formatter *f, Context *fin);
void repair_inode_stats(CInode *diri);
void repair_dirfrag_stats(CDir *dir);
void rdlock_dirfrags_stats(CInode *diri, MDSInternalContext *fin);
// my leader
MDSRank *mds;
// -- my cache --
LRU lru; // dentry lru for expiring items from cache
LRU bottom_lru; // dentries that should be trimmed ASAP
DecayRate decayrate;
int num_shadow_inodes = 0;
int num_inodes_with_caps = 0;
unsigned max_dir_commit_size;
file_layout_t default_file_layout;
file_layout_t default_log_layout;
// -- client leases --
static constexpr std::size_t client_lease_pools = 3;
std::array<float, client_lease_pools> client_lease_durations{5.0, 30.0, 300.0};
// -- client caps --
uint64_t last_cap_id = 0;
std::map<ceph_tid_t, discover_info_t> discovers;
ceph_tid_t discover_last_tid = 0;
// waiters
std::map<int, std::map<inodeno_t, MDSContext::vec > > waiting_for_base_ino;
std::map<inodeno_t,std::map<client_t, reconnected_cap_info_t> > reconnected_caps; // inode -> client -> snap_follows,realmino
std::map<inodeno_t,std::map<client_t, snapid_t> > reconnected_snaprealms; // realmino -> client -> realmseq
// realm inodes
std::set<CInode*> rejoin_pending_snaprealms;
// cap imports. delayed snap parent opens.
std::map<client_t,std::set<CInode*> > delayed_imported_caps;
// subsystems