Skip to content

Commit 4779932

Browse files
koverstreetKent Overstreet
authored andcommitted
bcachefs: more key marking refactoring
prep work for erasure coding Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
1 parent 103e212 commit 4779932

File tree

5 files changed

+132
-93
lines changed

5 files changed

+132
-93
lines changed

fs/bcachefs/btree_gc.c

Lines changed: 18 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -212,34 +212,31 @@ static int bch2_gc_mark_key(struct bch_fs *c, enum bkey_type type,
212212
struct bkey_s_c k, bool initial)
213213
{
214214
struct gc_pos pos = { 0 };
215-
unsigned flags = initial ? BCH_BUCKET_MARK_NOATOMIC : 0;
215+
unsigned flags =
216+
BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
217+
BCH_BUCKET_MARK_GC_LOCK_HELD|
218+
(initial ? BCH_BUCKET_MARK_NOATOMIC : 0);
216219
int ret = 0;
217220

218221
switch (type) {
219222
case BKEY_TYPE_BTREE:
220-
if (initial) {
221-
ret = bch2_btree_mark_ptrs_initial(c, type, k);
222-
if (ret < 0)
223-
return ret;
224-
}
225-
226-
bch2_mark_key(c, k, c->opts.btree_node_size,
227-
BCH_DATA_BTREE, pos, NULL,
228-
0, flags|
229-
BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
230-
BCH_BUCKET_MARK_GC_LOCK_HELD);
231-
break;
232223
case BKEY_TYPE_EXTENTS:
233224
if (initial) {
234225
ret = bch2_btree_mark_ptrs_initial(c, type, k);
235226
if (ret < 0)
236227
return ret;
237228
}
229+
break;
230+
default:
231+
break;
232+
}
233+
234+
bch2_mark_key(c, type, k, true, k.k->size,
235+
pos, NULL, 0, flags);
238236

239-
bch2_mark_key(c, k, k.k->size, BCH_DATA_USER, pos, NULL,
240-
0, flags|
241-
BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
242-
BCH_BUCKET_MARK_GC_LOCK_HELD);
237+
switch (type) {
238+
case BKEY_TYPE_BTREE:
239+
case BKEY_TYPE_EXTENTS:
243240
ret = bch2_btree_key_recalc_oldest_gen(c, k);
244241
break;
245242
default:
@@ -473,10 +470,10 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
473470

474471
for_each_pending_btree_node_free(c, as, d)
475472
if (d->index_update_done)
476-
bch2_mark_key(c, bkey_i_to_s_c(&d->key),
477-
c->opts.btree_node_size,
478-
BCH_DATA_BTREE, pos,
479-
&stats, 0,
473+
bch2_mark_key(c, BKEY_TYPE_BTREE,
474+
bkey_i_to_s_c(&d->key),
475+
true, 0,
476+
pos, &stats, 0,
480477
BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
481478
BCH_BUCKET_MARK_GC_LOCK_HELD);
482479
/*

fs/bcachefs/btree_update_interior.c

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -211,11 +211,12 @@ static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
211211
if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
212212
struct bch_fs_usage tmp = { 0 };
213213

214-
bch2_mark_key(c, bkey_i_to_s_c(&d->key),
215-
-c->opts.btree_node_size, BCH_DATA_BTREE, b
216-
? gc_pos_btree_node(b)
217-
: gc_pos_btree_root(as->btree_id),
218-
&tmp, 0, 0);
214+
bch2_mark_key(c, BKEY_TYPE_BTREE,
215+
bkey_i_to_s_c(&d->key),
216+
false, 0, b
217+
? gc_pos_btree_node(b)
218+
: gc_pos_btree_root(as->btree_id),
219+
&tmp, 0, 0);
219220
/*
220221
* Don't apply tmp - pending deletes aren't tracked in
221222
* bch_alloc_stats:
@@ -290,10 +291,11 @@ static void bch2_btree_node_free_ondisk(struct bch_fs *c,
290291

291292
BUG_ON(!pending->index_update_done);
292293

293-
bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
294-
-c->opts.btree_node_size, BCH_DATA_BTREE,
295-
gc_phase(GC_PHASE_PENDING_DELETE),
296-
&stats, 0, 0);
294+
bch2_mark_key(c, BKEY_TYPE_BTREE,
295+
bkey_i_to_s_c(&pending->key),
296+
false, 0,
297+
gc_phase(GC_PHASE_PENDING_DELETE),
298+
&stats, 0, 0);
297299
/*
298300
* Don't apply stats - pending deletes aren't tracked in
299301
* bch_alloc_stats:
@@ -1092,8 +1094,9 @@ static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
10921094

10931095
__bch2_btree_set_root_inmem(c, b);
10941096

1095-
bch2_mark_key(c, bkey_i_to_s_c(&b->key),
1096-
c->opts.btree_node_size, BCH_DATA_BTREE,
1097+
bch2_mark_key(c, BKEY_TYPE_BTREE,
1098+
bkey_i_to_s_c(&b->key),
1099+
true, 0,
10971100
gc_pos_btree_root(b->btree_id),
10981101
&stats, 0, 0);
10991102

@@ -1180,9 +1183,10 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
11801183
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b));
11811184

11821185
if (bkey_extent_is_data(&insert->k))
1183-
bch2_mark_key(c, bkey_i_to_s_c(insert),
1184-
c->opts.btree_node_size, BCH_DATA_BTREE,
1185-
gc_pos_btree_node(b), &stats, 0, 0);
1186+
bch2_mark_key(c, BKEY_TYPE_BTREE,
1187+
bkey_i_to_s_c(insert),
1188+
true, 0,
1189+
gc_pos_btree_node(b), &stats, 0, 0);
11861190

11871191
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
11881192
bkey_iter_pos_cmp(b, &insert->k.p, k) > 0)
@@ -1967,8 +1971,9 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
19671971

19681972
bch2_btree_node_lock_write(b, iter);
19691973

1970-
bch2_mark_key(c, bkey_i_to_s_c(&new_key->k_i),
1971-
c->opts.btree_node_size, BCH_DATA_BTREE,
1974+
bch2_mark_key(c, BKEY_TYPE_BTREE,
1975+
bkey_i_to_s_c(&new_key->k_i),
1976+
true, 0,
19721977
gc_pos_btree_root(b->btree_id),
19731978
&stats, 0, 0);
19741979
bch2_btree_node_free_index(as, NULL,

fs/bcachefs/buckets.c

Lines changed: 87 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -539,24 +539,10 @@ static int __disk_sectors(struct bch_extent_crc_unpacked crc, unsigned sectors)
539539
crc.uncompressed_size));
540540
}
541541

542-
/*
543-
* Checking against gc's position has to be done here, inside the cmpxchg()
544-
* loop, to avoid racing with the start of gc clearing all the marks - GC does
545-
* that with the gc pos seqlock held.
546-
*/
547-
static void bch2_mark_pointer(struct bch_fs *c,
548-
struct bkey_s_c_extent e,
549-
struct extent_ptr_decoded p,
550-
s64 sectors, enum bch_data_type data_type,
551-
unsigned replicas,
552-
struct bch_fs_usage *fs_usage,
553-
u64 journal_seq, unsigned flags)
542+
static s64 ptr_disk_sectors(struct bkey_s_c_extent e,
543+
struct extent_ptr_decoded p,
544+
s64 sectors)
554545
{
555-
struct bucket_mark old, new;
556-
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
557-
struct bucket *g = PTR_BUCKET(ca, &p.ptr);
558-
s64 uncompressed_sectors = sectors;
559-
u64 v;
560546

561547
if (p.crc.compression_type) {
562548
unsigned old_sectors, new_sectors;
@@ -573,19 +559,25 @@ static void bch2_mark_pointer(struct bch_fs *c,
573559
+__disk_sectors(p.crc, new_sectors);
574560
}
575561

576-
/*
577-
* fs level usage (which determines free space) is in uncompressed
578-
* sectors, until copygc + compression is sorted out:
579-
*
580-
* note also that we always update @fs_usage, even when we otherwise
581-
* wouldn't do anything because gc is running - this is because the
582-
* caller still needs to account w.r.t. its disk reservation. It is
583-
* caller's responsibility to not apply @fs_usage if gc is in progress.
584-
*/
585-
fs_usage->replicas
586-
[!p.ptr.cached && replicas ? replicas - 1 : 0].data
587-
[!p.ptr.cached ? data_type : BCH_DATA_CACHED] +=
588-
uncompressed_sectors;
562+
return sectors;
563+
}
564+
565+
/*
566+
* Checking against gc's position has to be done here, inside the cmpxchg()
567+
* loop, to avoid racing with the start of gc clearing all the marks - GC does
568+
* that with the gc pos seqlock held.
569+
*/
570+
static void bch2_mark_pointer(struct bch_fs *c,
571+
struct bkey_s_c_extent e,
572+
struct extent_ptr_decoded p,
573+
s64 sectors, enum bch_data_type data_type,
574+
struct bch_fs_usage *fs_usage,
575+
u64 journal_seq, unsigned flags)
576+
{
577+
struct bucket_mark old, new;
578+
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
579+
struct bucket *g = PTR_BUCKET(ca, &p.ptr);
580+
u64 v;
589581

590582
if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT) {
591583
if (journal_seq)
@@ -644,16 +636,64 @@ static void bch2_mark_pointer(struct bch_fs *c,
644636
bucket_became_unavailable(c, old, new));
645637
}
646638

647-
void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
648-
s64 sectors, enum bch_data_type data_type,
649-
struct gc_pos pos,
650-
struct bch_fs_usage *stats,
651-
u64 journal_seq, unsigned flags)
639+
static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
640+
s64 sectors, enum bch_data_type data_type,
641+
struct gc_pos pos,
642+
struct bch_fs_usage *stats,
643+
u64 journal_seq, unsigned flags)
652644
{
653645
unsigned replicas = bch2_extent_nr_dirty_ptrs(k);
654646

655647
BUG_ON(replicas && replicas - 1 > ARRAY_SIZE(stats->replicas));
648+
BUG_ON(!sectors);
649+
650+
switch (k.k->type) {
651+
case BCH_EXTENT:
652+
case BCH_EXTENT_CACHED: {
653+
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
654+
const union bch_extent_entry *entry;
655+
struct extent_ptr_decoded p;
656+
657+
extent_for_each_ptr_decode(e, p, entry) {
658+
s64 disk_sectors = ptr_disk_sectors(e, p, sectors);
659+
660+
/*
661+
* fs level usage (which determines free space) is in
662+
* uncompressed sectors, until copygc + compression is
663+
* sorted out:
664+
*
665+
* note also that we always update @fs_usage, even when
666+
* we otherwise wouldn't do anything because gc is
667+
* running - this is because the caller still needs to
668+
* account w.r.t. its disk reservation. It is caller's
669+
* responsibility to not apply @fs_usage if gc is in
670+
* progress.
671+
*/
672+
stats->replicas
673+
[!p.ptr.cached && replicas ? replicas - 1 : 0].data
674+
[!p.ptr.cached ? data_type : BCH_DATA_CACHED] +=
675+
sectors;
676+
677+
bch2_mark_pointer(c, e, p, disk_sectors, data_type,
678+
stats, journal_seq, flags);
679+
}
680+
break;
681+
}
682+
case BCH_RESERVATION:
683+
if (replicas)
684+
stats->replicas[replicas - 1].persistent_reserved +=
685+
sectors * replicas;
686+
break;
687+
}
688+
}
656689

690+
void bch2_mark_key(struct bch_fs *c,
691+
enum bkey_type type, struct bkey_s_c k,
692+
bool inserting, s64 sectors,
693+
struct gc_pos pos,
694+
struct bch_fs_usage *stats,
695+
u64 journal_seq, unsigned flags)
696+
{
657697
/*
658698
* synchronization w.r.t. GC:
659699
*
@@ -690,24 +730,19 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
690730
if (!stats)
691731
stats = this_cpu_ptr(c->usage_percpu);
692732

693-
switch (k.k->type) {
694-
case BCH_EXTENT:
695-
case BCH_EXTENT_CACHED: {
696-
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
697-
const union bch_extent_entry *entry;
698-
struct extent_ptr_decoded p;
699-
700-
BUG_ON(!sectors);
701-
702-
extent_for_each_ptr_decode(e, p, entry)
703-
bch2_mark_pointer(c, e, p, sectors, data_type,
704-
replicas, stats, journal_seq, flags);
733+
switch (type) {
734+
case BKEY_TYPE_BTREE:
735+
bch2_mark_extent(c, k, inserting
736+
? c->opts.btree_node_size
737+
: -c->opts.btree_node_size,
738+
BCH_DATA_BTREE,
739+
pos, stats, journal_seq, flags);
705740
break;
706-
}
707-
case BCH_RESERVATION:
708-
if (replicas)
709-
stats->replicas[replicas - 1].persistent_reserved +=
710-
sectors * replicas;
741+
case BKEY_TYPE_EXTENTS:
742+
bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
743+
pos, stats, journal_seq, flags);
744+
break;
745+
default:
711746
break;
712747
}
713748
percpu_up_read(&c->usage_lock);

fs/bcachefs/buckets.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,8 +204,9 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
204204
#define BCH_BUCKET_MARK_GC_WILL_VISIT (1 << 2)
205205
#define BCH_BUCKET_MARK_GC_LOCK_HELD (1 << 3)
206206

207-
void bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, enum bch_data_type,
208-
struct gc_pos, struct bch_fs_usage *, u64, unsigned);
207+
void bch2_mark_key(struct bch_fs *, enum bkey_type, struct bkey_s_c,
208+
bool, s64, struct gc_pos,
209+
struct bch_fs_usage *, u64, unsigned);
209210

210211
void bch2_recalc_sectors_available(struct bch_fs *);
211212

fs/bcachefs/extents.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1009,8 +1009,9 @@ static void bch2_add_sectors(struct extent_insert_state *s,
10091009
if (!sectors)
10101010
return;
10111011

1012-
bch2_mark_key(c, k, sectors, BCH_DATA_USER, gc_pos_btree_node(b),
1013-
&s->stats, s->trans->journal_res.seq, 0);
1012+
bch2_mark_key(c, BKEY_TYPE_EXTENTS, k, sectors > 0, sectors,
1013+
gc_pos_btree_node(b), &s->stats,
1014+
s->trans->journal_res.seq, 0);
10141015
}
10151016

10161017
static void bch2_subtract_sectors(struct extent_insert_state *s,

0 commit comments

Comments
 (0)