@@ -446,6 +446,7 @@ dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
446
446
dsl_dataset_t * ds = dn -> dn_objset -> os_dsl_dataset ;
447
447
spa_t * spa = txh -> txh_tx -> tx_pool -> dp_spa ;
448
448
int epbs ;
449
+ uint64_t l0span = 0 , nl1blks = 0 ;
449
450
450
451
if (dn -> dn_nlevels == 0 )
451
452
return ;
@@ -478,6 +479,7 @@ dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
478
479
nblks = dn -> dn_maxblkid - blkid ;
479
480
480
481
}
482
+ l0span = nblks ; /* save for later use to calc level > 1 overhead */
481
483
if (dn -> dn_nlevels == 1 ) {
482
484
int i ;
483
485
for (i = 0 ; i < nblks ; i ++ ) {
@@ -490,24 +492,10 @@ dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
490
492
}
491
493
unref += BP_GET_ASIZE (bp );
492
494
}
495
+ nl1blks = 1 ;
493
496
nblks = 0 ;
494
497
}
495
498
496
- /*
497
- * Add in memory requirements of higher-level indirects.
498
- * This assumes a worst-possible scenario for dn_nlevels.
499
- */
500
- {
501
- uint64_t blkcnt = 1 + ((nblks >> epbs ) >> epbs );
502
- int level = (dn -> dn_nlevels > 1 ) ? 2 : 1 ;
503
-
504
- while (level ++ < DN_MAX_LEVELS ) {
505
- txh -> txh_memory_tohold += blkcnt << dn -> dn_indblkshift ;
506
- blkcnt = 1 + (blkcnt >> epbs );
507
- }
508
- ASSERT (blkcnt <= dn -> dn_nblkptr );
509
- }
510
-
511
499
lastblk = blkid + nblks - 1 ;
512
500
while (nblks ) {
513
501
dmu_buf_impl_t * dbuf ;
@@ -578,11 +566,35 @@ dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
578
566
}
579
567
dbuf_rele (dbuf , FTAG );
580
568
569
+ ++ nl1blks ;
581
570
blkid += tochk ;
582
571
nblks -= tochk ;
583
572
}
584
573
rw_exit (& dn -> dn_struct_rwlock );
585
574
575
+ /*
576
+ * Add in memory requirements of higher-level indirects.
577
+ * This assumes a worst-possible scenario for dn_nlevels and a
578
+ * worst-possible distribution of l1-blocks over the region to free.
579
+ */
580
+ {
581
+ uint64_t blkcnt = 1 + ((l0span >> epbs ) >> epbs );
582
+ int level = 2 ;
583
+ /*
584
+ * Here we don't use DN_MAX_LEVEL, but calculate it with the
585
+ * given datablkshift and indblkshift. This makes the
586
+ * difference between 19 and 8 on large files.
587
+ */
588
+ int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn -> dn_datablkshift ) /
589
+ (dn -> dn_indblkshift - SPA_BLKPTRSHIFT );
590
+
591
+ while (level ++ < maxlevel ) {
592
+ txh -> txh_memory_tohold += MIN (blkcnt , (nl1blks >> epbs ))
593
+ << dn -> dn_indblkshift ;
594
+ blkcnt = 1 + (blkcnt >> epbs );
595
+ }
596
+ }
597
+
586
598
/* account for new level 1 indirect blocks that might show up */
587
599
if (skipped > 0 ) {
588
600
txh -> txh_fudge += skipped << dn -> dn_indblkshift ;
0 commit comments