Skip to content

Commit

Permalink
ext4: Convert ext4_bio_write_page() to use a folio
Browse files Browse the repository at this point in the history
Remove several calls to compound_head() and the last caller of
set_page_writeback_keepwrite(), so remove the wrapper too.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
  • Loading branch information
Matthew Wilcox (Oracle) authored and intel-lab-lkp committed Jan 28, 2023
1 parent 242c6f8 commit f6e4c5c
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 36 deletions.
58 changes: 27 additions & 31 deletions fs/ext4/page-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -409,62 +409,60 @@ static void io_submit_init_bio(struct ext4_io_submit *io,

static void io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode,
struct page *page,
struct folio *folio,
struct buffer_head *bh)
{
int ret;

if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
!fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
submit_and_retry:
ext4_io_submit(io);
}
if (io->io_bio == NULL)
io_submit_init_bio(io, bh);
ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
if (ret != bh->b_size)
if (!bio_add_folio(io->io_bio, folio, bh->b_size, bh_offset(bh)))
goto submit_and_retry;
wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size);
io->io_next_block++;
}

int ext4_bio_write_page(struct ext4_io_submit *io,
struct page *page,
int len)
{
struct page *bounce_page = NULL;
struct inode *inode = page->mapping->host;
struct folio *folio = page_folio(page);
struct folio *io_folio = folio;
struct inode *inode = folio->mapping->host;
unsigned block_start;
struct buffer_head *bh, *head;
int ret = 0;
int nr_to_submit = 0;
struct writeback_control *wbc = io->io_wbc;
bool keep_towrite = false;

BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
BUG_ON(!folio_test_locked(folio));
BUG_ON(folio_test_writeback(folio));

ClearPageError(page);
folio_clear_error(folio);

/*
* Comments copied from block_write_full_page:
*
* The page straddles i_size. It must be zeroed out on each and every
* The folio straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
if (len < PAGE_SIZE)
zero_user_segment(page, len, PAGE_SIZE);
if (len < folio_size(folio))
folio_zero_segment(folio, len, folio_size(folio));
/*
* In the first loop we prepare and mark buffers to submit. We have to
* mark all buffers in the page before submitting so that
* end_page_writeback() cannot be called from ext4_end_bio() when IO
* mark all buffers in the folio before submitting so that
* folio_end_writeback() cannot be called from ext4_end_bio() when IO
* on the first buffer finishes and we are still working on submitting
* the second buffer.
*/
bh = head = page_buffers(page);
bh = head = folio_buffers(folio);
do {
block_start = bh_offset(bh);
if (block_start >= len) {
Expand All @@ -479,14 +477,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
clear_buffer_dirty(bh);
/*
* Keeping dirty some buffer we cannot write? Make sure
* to redirty the page and keep TOWRITE tag so that
* racing WB_SYNC_ALL writeback does not skip the page.
* to redirty the folio and keep TOWRITE tag so that
* racing WB_SYNC_ALL writeback does not skip the folio.
* This happens e.g. when doing writeout for
* transaction commit.
*/
if (buffer_dirty(bh)) {
if (!PageDirty(page))
redirty_page_for_writepage(wbc, page);
if (!folio_test_dirty(folio))
folio_redirty_for_writepage(wbc, folio);
keep_towrite = true;
}
continue;
Expand All @@ -498,11 +496,11 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
nr_to_submit++;
} while ((bh = bh->b_this_page) != head);

/* Nothing to submit? Just unlock the page... */
/* Nothing to submit? Just unlock the folio... */
if (!nr_to_submit)
goto unlock;

bh = head = page_buffers(page);
bh = head = folio_buffers(folio);

/*
* If any blocks are being written to an encrypted file, encrypt them
Expand All @@ -514,6 +512,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) {
gfp_t gfp_flags = GFP_NOFS;
unsigned int enc_bytes = round_up(len, i_blocksize(inode));
struct page *bounce_page;

/*
* Since bounce page allocation uses a mempool, we can only use
Expand All @@ -540,7 +539,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
}

printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page);
folio_redirty_for_writepage(wbc, folio);
do {
if (buffer_async_write(bh)) {
clear_buffer_async_write(bh);
Expand All @@ -550,21 +549,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
} while (bh != head);
goto unlock;
}
io_folio = page_folio(bounce_page);
}

if (keep_towrite)
set_page_writeback_keepwrite(page);
else
set_page_writeback(page);
__folio_start_writeback(folio, keep_towrite);

/* Now submit buffers to write */
do {
if (!buffer_async_write(bh))
continue;
io_submit_add_bh(io, inode,
bounce_page ? bounce_page : page, bh);
io_submit_add_bh(io, inode, io_folio, bh);
} while ((bh = bh->b_this_page) != head);
unlock:
unlock_page(page);
folio_unlock(folio);
return ret;
}
5 changes: 0 additions & 5 deletions include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -766,11 +766,6 @@ bool set_page_writeback(struct page *page);
#define folio_start_writeback_keepwrite(folio) \
__folio_start_writeback(folio, true)

static inline void set_page_writeback_keepwrite(struct page *page)
{
folio_start_writeback_keepwrite(page_folio(page));
}

static inline bool test_set_page_writeback(struct page *page)
{
return set_page_writeback(page);
Expand Down

0 comments on commit f6e4c5c

Please sign in to comment.