Skip to content

Commit 6fdf886

Browse files
committed
Merge tag 'for-5.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
Pull btrfs fixes from David Sterba: "Several xes and one old ioctl deprecation. Namely there's fix for crashes/warnings with lzo compression that was suspected to be caused by first pull merge resolution, but it was a different bug. Summary: - regression fix for a crash in lzo due to missing boundary checks of the page array - fix crashes on ARM64 due to missing barriers when synchronizing status bits between work queues - silence lockdep when reading chunk tree during mount - fix false positive warning in integrity checker on devices with disabled write caching - fix signedness of bitfields in scrub - start deprecation of balance v1 ioctl" * tag 'for-5.16-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: deprecate BTRFS_IOC_BALANCE ioctl btrfs: make 1-bit bit-fields of scrub_page unsigned int btrfs: check-integrity: fix a warning on write caching disabled disk btrfs: silence lockdep when reading chunk tree during mount btrfs: fix memory ordering between normal and ordered work functions btrfs: fix a out-of-bound access in copy_compressed_data_to_page()
2 parents db850a9 + 6c405b2 commit 6fdf886

File tree

6 files changed

+58
-9
lines changed

6 files changed

+58
-9
lines changed

fs/btrfs/async-thread.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
234234
ordered_list);
235235
if (!test_bit(WORK_DONE_BIT, &work->flags))
236236
break;
237+
/*
238+
* Orders all subsequent loads after reading WORK_DONE_BIT,
239+
* paired with the smp_mb__before_atomic in btrfs_work_helper
240+
* this guarantees that the ordered function will see all
241+
* updates from ordinary work function.
242+
*/
243+
smp_rmb();
237244

238245
/*
239246
* we are going to call the ordered done function, but
@@ -317,6 +324,13 @@ static void btrfs_work_helper(struct work_struct *normal_work)
317324
thresh_exec_hook(wq);
318325
work->func(work);
319326
if (need_order) {
327+
/*
328+
* Ensures all memory accesses done in the work function are
329+
* ordered before setting the WORK_DONE_BIT. Ensuring the thread
330+
* which is going to executed the ordered work sees them.
331+
* Pairs with the smp_rmb in run_ordered_work.
332+
*/
333+
smp_mb__before_atomic();
320334
set_bit(WORK_DONE_BIT, &work->flags);
321335
run_ordered_work(wq, work);
322336
} else {

fs/btrfs/disk-io.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3978,11 +3978,23 @@ static void btrfs_end_empty_barrier(struct bio *bio)
39783978
*/
39793979
static void write_dev_flush(struct btrfs_device *device)
39803980
{
3981-
struct request_queue *q = bdev_get_queue(device->bdev);
39823981
struct bio *bio = device->flush_bio;
39833982

3983+
#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3984+
/*
3985+
* When a disk has write caching disabled, we skip submission of a bio
3986+
* with flush and sync requests before writing the superblock, since
3987+
* it's not needed. However when the integrity checker is enabled, this
3988+
* results in reports that there are metadata blocks referred by a
3989+
* superblock that were not properly flushed. So don't skip the bio
3990+
* submission only when the integrity checker is enabled for the sake
3991+
* of simplicity, since this is a debug tool and not meant for use in
3992+
* non-debug builds.
3993+
*/
3994+
struct request_queue *q = bdev_get_queue(device->bdev);
39843995
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
39853996
return;
3997+
#endif
39863998

39873999
bio_reset(bio);
39884000
bio->bi_end_io = btrfs_end_empty_barrier;

fs/btrfs/ioctl.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3985,6 +3985,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
39853985
bool need_unlock; /* for mut. excl. ops lock */
39863986
int ret;
39873987

3988+
if (!arg)
3989+
btrfs_warn(fs_info,
3990+
"IOC_BALANCE ioctl (v1) is deprecated and will be removed in kernel 5.18");
3991+
39883992
if (!capable(CAP_SYS_ADMIN))
39893993
return -EPERM;
39903994

fs/btrfs/lzo.c

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ static inline size_t read_compress_length(const char *buf)
125125
static int copy_compressed_data_to_page(char *compressed_data,
126126
size_t compressed_size,
127127
struct page **out_pages,
128+
unsigned long max_nr_page,
128129
u32 *cur_out,
129130
const u32 sectorsize)
130131
{
@@ -133,6 +134,9 @@ static int copy_compressed_data_to_page(char *compressed_data,
133134
struct page *cur_page;
134135
char *kaddr;
135136

137+
if ((*cur_out / PAGE_SIZE) >= max_nr_page)
138+
return -E2BIG;
139+
136140
/*
137141
* We never allow a segment header crossing sector boundary, previous
138142
* run should ensure we have enough space left inside the sector.
@@ -161,6 +165,10 @@ static int copy_compressed_data_to_page(char *compressed_data,
161165
orig_out + compressed_size - *cur_out);
162166

163167
kunmap(cur_page);
168+
169+
if ((*cur_out / PAGE_SIZE) >= max_nr_page)
170+
return -E2BIG;
171+
164172
cur_page = out_pages[*cur_out / PAGE_SIZE];
165173
/* Allocate a new page */
166174
if (!cur_page) {
@@ -203,13 +211,15 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
203211
const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
204212
struct page *page_in = NULL;
205213
char *sizes_ptr;
214+
const unsigned long max_nr_page = *out_pages;
206215
int ret = 0;
207216
/* Points to the file offset of input data */
208217
u64 cur_in = start;
209218
/* Points to the current output byte */
210219
u32 cur_out = 0;
211220
u32 len = *total_out;
212221

222+
ASSERT(max_nr_page > 0);
213223
*out_pages = 0;
214224
*total_out = 0;
215225
*total_in = 0;
@@ -248,7 +258,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
248258
}
249259

250260
ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
251-
pages, &cur_out, sectorsize);
261+
pages, max_nr_page,
262+
&cur_out, sectorsize);
252263
if (ret < 0)
253264
goto out;
254265

fs/btrfs/scrub.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,8 +73,8 @@ struct scrub_page {
7373
u64 physical_for_dev_replace;
7474
atomic_t refs;
7575
u8 mirror_num;
76-
int have_csum:1;
77-
int io_error:1;
76+
unsigned int have_csum:1;
77+
unsigned int io_error:1;
7878
u8 csum[BTRFS_CSUM_SIZE];
7979

8080
struct scrub_recover *recover;

fs/btrfs/volumes.c

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7558,6 +7558,19 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
75587558
*/
75597559
fs_info->fs_devices->total_rw_bytes = 0;
75607560

7561+
/*
7562+
* Lockdep complains about possible circular locking dependency between
7563+
* a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7564+
* used for freeze procection of a fs (struct super_block.s_writers),
7565+
* which we take when starting a transaction, and extent buffers of the
7566+
* chunk tree if we call read_one_dev() while holding a lock on an
7567+
* extent buffer of the chunk tree. Since we are mounting the filesystem
7568+
* and at this point there can't be any concurrent task modifying the
7569+
* chunk tree, to keep it simple, just skip locking on the chunk tree.
7570+
*/
7571+
ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7572+
path->skip_locking = 1;
7573+
75617574
/*
75627575
* Read all device items, and then all the chunk items. All
75637576
* device items are found before any chunk item (their object id
@@ -7583,10 +7596,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
75837596
goto error;
75847597
break;
75857598
}
7586-
/*
7587-
* The nodes on level 1 are not locked but we don't need to do
7588-
* that during mount time as nothing else can access the tree
7589-
*/
75907599
node = path->nodes[1];
75917600
if (node) {
75927601
if (last_ra_node != node->start) {
@@ -7614,7 +7623,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
76147623
* requirement for chunk allocation, see the comment on
76157624
* top of btrfs_chunk_alloc() for details.
76167625
*/
7617-
ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
76187626
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
76197627
ret = read_one_chunk(&found_key, leaf, chunk);
76207628
if (ret)

0 commit comments

Comments
 (0)