@@ -539,24 +539,10 @@ static int __disk_sectors(struct bch_extent_crc_unpacked crc, unsigned sectors)
539539 crc .uncompressed_size ));
540540}
541541
542- /*
543- * Checking against gc's position has to be done here, inside the cmpxchg()
544- * loop, to avoid racing with the start of gc clearing all the marks - GC does
545- * that with the gc pos seqlock held.
546- */
547- static void bch2_mark_pointer (struct bch_fs * c ,
548- struct bkey_s_c_extent e ,
549- struct extent_ptr_decoded p ,
550- s64 sectors , enum bch_data_type data_type ,
551- unsigned replicas ,
552- struct bch_fs_usage * fs_usage ,
553- u64 journal_seq , unsigned flags )
542+ static s64 ptr_disk_sectors (struct bkey_s_c_extent e ,
543+ struct extent_ptr_decoded p ,
544+ s64 sectors )
554545{
555- struct bucket_mark old , new ;
556- struct bch_dev * ca = bch_dev_bkey_exists (c , p .ptr .dev );
557- struct bucket * g = PTR_BUCKET (ca , & p .ptr );
558- s64 uncompressed_sectors = sectors ;
559- u64 v ;
560546
561547 if (p .crc .compression_type ) {
562548 unsigned old_sectors , new_sectors ;
@@ -573,19 +559,25 @@ static void bch2_mark_pointer(struct bch_fs *c,
573559 + __disk_sectors (p .crc , new_sectors );
574560 }
575561
576- /*
577- * fs level usage (which determines free space) is in uncompressed
578- * sectors, until copygc + compression is sorted out:
579- *
580- * note also that we always update @fs_usage, even when we otherwise
581- * wouldn't do anything because gc is running - this is because the
582- * caller still needs to account w.r.t. its disk reservation. It is
583- * caller's responsibility to not apply @fs_usage if gc is in progress.
584- */
585- fs_usage -> replicas
586- [!p .ptr .cached && replicas ? replicas - 1 : 0 ].data
587- [!p .ptr .cached ? data_type : BCH_DATA_CACHED ] +=
588- uncompressed_sectors ;
562+ return sectors ;
563+ }
564+
565+ /*
566+ * Checking against gc's position has to be done here, inside the cmpxchg()
567+ * loop, to avoid racing with the start of gc clearing all the marks - GC does
568+ * that with the gc pos seqlock held.
569+ */
570+ static void bch2_mark_pointer (struct bch_fs * c ,
571+ struct bkey_s_c_extent e ,
572+ struct extent_ptr_decoded p ,
573+ s64 sectors , enum bch_data_type data_type ,
574+ struct bch_fs_usage * fs_usage ,
575+ u64 journal_seq , unsigned flags )
576+ {
577+ struct bucket_mark old , new ;
578+ struct bch_dev * ca = bch_dev_bkey_exists (c , p .ptr .dev );
579+ struct bucket * g = PTR_BUCKET (ca , & p .ptr );
580+ u64 v ;
589581
590582 if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT ) {
591583 if (journal_seq )
@@ -644,16 +636,64 @@ static void bch2_mark_pointer(struct bch_fs *c,
644636 bucket_became_unavailable (c , old , new ));
645637}
646638
647- void bch2_mark_key (struct bch_fs * c , struct bkey_s_c k ,
648- s64 sectors , enum bch_data_type data_type ,
649- struct gc_pos pos ,
650- struct bch_fs_usage * stats ,
651- u64 journal_seq , unsigned flags )
639+ static void bch2_mark_extent (struct bch_fs * c , struct bkey_s_c k ,
640+ s64 sectors , enum bch_data_type data_type ,
641+ struct gc_pos pos ,
642+ struct bch_fs_usage * stats ,
643+ u64 journal_seq , unsigned flags )
652644{
653645 unsigned replicas = bch2_extent_nr_dirty_ptrs (k );
654646
655647 BUG_ON (replicas && replicas - 1 > ARRAY_SIZE (stats -> replicas ));
648+ BUG_ON (!sectors );
649+
650+ switch (k .k -> type ) {
651+ case BCH_EXTENT :
652+ case BCH_EXTENT_CACHED : {
653+ struct bkey_s_c_extent e = bkey_s_c_to_extent (k );
654+ const union bch_extent_entry * entry ;
655+ struct extent_ptr_decoded p ;
656+
657+ extent_for_each_ptr_decode (e , p , entry ) {
658+ s64 disk_sectors = ptr_disk_sectors (e , p , sectors );
659+
660+ /*
661+ * fs level usage (which determines free space) is in
662+ * uncompressed sectors, until copygc + compression is
663+ * sorted out:
664+ *
665+ * note also that we always update @fs_usage, even when
666+ * we otherwise wouldn't do anything because gc is
667+ * running - this is because the caller still needs to
668+ * account w.r.t. its disk reservation. It is caller's
669+ * responsibility to not apply @fs_usage if gc is in
670+ * progress.
671+ */
672+ stats -> replicas
673+ [!p .ptr .cached && replicas ? replicas - 1 : 0 ].data
674+ [!p .ptr .cached ? data_type : BCH_DATA_CACHED ] +=
675+ sectors ;
676+
677+ bch2_mark_pointer (c , e , p , disk_sectors , data_type ,
678+ stats , journal_seq , flags );
679+ }
680+ break ;
681+ }
682+ case BCH_RESERVATION :
683+ if (replicas )
684+ stats -> replicas [replicas - 1 ].persistent_reserved +=
685+ sectors * replicas ;
686+ break ;
687+ }
688+ }
656689
690+ void bch2_mark_key (struct bch_fs * c ,
691+ enum bkey_type type , struct bkey_s_c k ,
692+ bool inserting , s64 sectors ,
693+ struct gc_pos pos ,
694+ struct bch_fs_usage * stats ,
695+ u64 journal_seq , unsigned flags )
696+ {
657697 /*
658698 * synchronization w.r.t. GC:
659699 *
@@ -690,24 +730,19 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
690730 if (!stats )
691731 stats = this_cpu_ptr (c -> usage_percpu );
692732
693- switch (k .k -> type ) {
694- case BCH_EXTENT :
695- case BCH_EXTENT_CACHED : {
696- struct bkey_s_c_extent e = bkey_s_c_to_extent (k );
697- const union bch_extent_entry * entry ;
698- struct extent_ptr_decoded p ;
699-
700- BUG_ON (!sectors );
701-
702- extent_for_each_ptr_decode (e , p , entry )
703- bch2_mark_pointer (c , e , p , sectors , data_type ,
704- replicas , stats , journal_seq , flags );
733+ switch (type ) {
734+ case BKEY_TYPE_BTREE :
735+ bch2_mark_extent (c , k , inserting
736+ ? c -> opts .btree_node_size
737+ : - c -> opts .btree_node_size ,
738+ BCH_DATA_BTREE ,
739+ pos , stats , journal_seq , flags );
705740 break ;
706- }
707- case BCH_RESERVATION :
708- if ( replicas )
709- stats -> replicas [ replicas - 1 ]. persistent_reserved +=
710- sectors * replicas ;
741+ case BKEY_TYPE_EXTENTS :
742+ bch2_mark_extent ( c , k , sectors , BCH_DATA_USER ,
743+ pos , stats , journal_seq , flags );
744+ break ;
745+ default :
711746 break ;
712747 }
713748 percpu_up_read (& c -> usage_lock );
0 commit comments