Skip to content

Commit cc9cf35

Browse files
Christoph HellwigMatthew Wilcox (Oracle)
authored andcommitted
fs: remove the nobh helpers
All callers are gone, so remove the now dead code. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
1 parent 002cbb1 commit cc9cf35

File tree

4 files changed

+1
-358
lines changed

4 files changed

+1
-358
lines changed

fs/buffer.c

Lines changed: 0 additions & 324 deletions
Original file line numberDiff line numberDiff line change
@@ -2537,330 +2537,6 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
25372537
}
25382538
EXPORT_SYMBOL(block_page_mkwrite);
25392539

2540-
/*
2541-
* nobh_write_begin()'s prereads are special: the buffer_heads are freed
2542-
* immediately, while under the page lock. So it needs a special end_io
2543-
* handler which does not touch the bh after unlocking it.
2544-
*/
2545-
static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2546-
{
2547-
__end_buffer_read_notouch(bh, uptodate);
2548-
}
2549-
2550-
/*
2551-
* Attach the singly-linked list of buffers created by nobh_write_begin, to
2552-
* the page (converting it to circular linked list and taking care of page
2553-
* dirty races).
2554-
*/
2555-
static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2556-
{
2557-
struct buffer_head *bh;
2558-
2559-
BUG_ON(!PageLocked(page));
2560-
2561-
spin_lock(&page->mapping->private_lock);
2562-
bh = head;
2563-
do {
2564-
if (PageDirty(page))
2565-
set_buffer_dirty(bh);
2566-
if (!bh->b_this_page)
2567-
bh->b_this_page = head;
2568-
bh = bh->b_this_page;
2569-
} while (bh != head);
2570-
attach_page_private(page, head);
2571-
spin_unlock(&page->mapping->private_lock);
2572-
}
2573-
2574-
/*
2575-
* On entry, the page is fully not uptodate.
2576-
* On exit the page is fully uptodate in the areas outside (from,to)
2577-
* The filesystem needs to handle block truncation upon failure.
2578-
*/
2579-
int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2580-
struct page **pagep, void **fsdata,
2581-
get_block_t *get_block)
2582-
{
2583-
struct inode *inode = mapping->host;
2584-
const unsigned blkbits = inode->i_blkbits;
2585-
const unsigned blocksize = 1 << blkbits;
2586-
struct buffer_head *head, *bh;
2587-
struct page *page;
2588-
pgoff_t index;
2589-
unsigned from, to;
2590-
unsigned block_in_page;
2591-
unsigned block_start, block_end;
2592-
sector_t block_in_file;
2593-
int nr_reads = 0;
2594-
int ret = 0;
2595-
int is_mapped_to_disk = 1;
2596-
2597-
index = pos >> PAGE_SHIFT;
2598-
from = pos & (PAGE_SIZE - 1);
2599-
to = from + len;
2600-
2601-
page = grab_cache_page_write_begin(mapping, index);
2602-
if (!page)
2603-
return -ENOMEM;
2604-
*pagep = page;
2605-
*fsdata = NULL;
2606-
2607-
if (page_has_buffers(page)) {
2608-
ret = __block_write_begin(page, pos, len, get_block);
2609-
if (unlikely(ret))
2610-
goto out_release;
2611-
return ret;
2612-
}
2613-
2614-
if (PageMappedToDisk(page))
2615-
return 0;
2616-
2617-
/*
2618-
* Allocate buffers so that we can keep track of state, and potentially
2619-
* attach them to the page if an error occurs. In the common case of
2620-
* no error, they will just be freed again without ever being attached
2621-
* to the page (which is all OK, because we're under the page lock).
2622-
*
2623-
* Be careful: the buffer linked list is a NULL terminated one, rather
2624-
* than the circular one we're used to.
2625-
*/
2626-
head = alloc_page_buffers(page, blocksize, false);
2627-
if (!head) {
2628-
ret = -ENOMEM;
2629-
goto out_release;
2630-
}
2631-
2632-
block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
2633-
2634-
/*
2635-
* We loop across all blocks in the page, whether or not they are
2636-
* part of the affected region. This is so we can discover if the
2637-
* page is fully mapped-to-disk.
2638-
*/
2639-
for (block_start = 0, block_in_page = 0, bh = head;
2640-
block_start < PAGE_SIZE;
2641-
block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2642-
int create;
2643-
2644-
block_end = block_start + blocksize;
2645-
bh->b_state = 0;
2646-
create = 1;
2647-
if (block_start >= to)
2648-
create = 0;
2649-
ret = get_block(inode, block_in_file + block_in_page,
2650-
bh, create);
2651-
if (ret)
2652-
goto failed;
2653-
if (!buffer_mapped(bh))
2654-
is_mapped_to_disk = 0;
2655-
if (buffer_new(bh))
2656-
clean_bdev_bh_alias(bh);
2657-
if (PageUptodate(page)) {
2658-
set_buffer_uptodate(bh);
2659-
continue;
2660-
}
2661-
if (buffer_new(bh) || !buffer_mapped(bh)) {
2662-
zero_user_segments(page, block_start, from,
2663-
to, block_end);
2664-
continue;
2665-
}
2666-
if (buffer_uptodate(bh))
2667-
continue; /* reiserfs does this */
2668-
if (block_start < from || block_end > to) {
2669-
lock_buffer(bh);
2670-
bh->b_end_io = end_buffer_read_nobh;
2671-
submit_bh(REQ_OP_READ, 0, bh);
2672-
nr_reads++;
2673-
}
2674-
}
2675-
2676-
if (nr_reads) {
2677-
/*
2678-
* The page is locked, so these buffers are protected from
2679-
* any VM or truncate activity. Hence we don't need to care
2680-
* for the buffer_head refcounts.
2681-
*/
2682-
for (bh = head; bh; bh = bh->b_this_page) {
2683-
wait_on_buffer(bh);
2684-
if (!buffer_uptodate(bh))
2685-
ret = -EIO;
2686-
}
2687-
if (ret)
2688-
goto failed;
2689-
}
2690-
2691-
if (is_mapped_to_disk)
2692-
SetPageMappedToDisk(page);
2693-
2694-
*fsdata = head; /* to be released by nobh_write_end */
2695-
2696-
return 0;
2697-
2698-
failed:
2699-
BUG_ON(!ret);
2700-
/*
2701-
* Error recovery is a bit difficult. We need to zero out blocks that
2702-
* were newly allocated, and dirty them to ensure they get written out.
2703-
* Buffers need to be attached to the page at this point, otherwise
2704-
* the handling of potential IO errors during writeout would be hard
2705-
* (could try doing synchronous writeout, but what if that fails too?)
2706-
*/
2707-
attach_nobh_buffers(page, head);
2708-
page_zero_new_buffers(page, from, to);
2709-
2710-
out_release:
2711-
unlock_page(page);
2712-
put_page(page);
2713-
*pagep = NULL;
2714-
2715-
return ret;
2716-
}
2717-
EXPORT_SYMBOL(nobh_write_begin);
2718-
2719-
int nobh_write_end(struct file *file, struct address_space *mapping,
2720-
loff_t pos, unsigned len, unsigned copied,
2721-
struct page *page, void *fsdata)
2722-
{
2723-
struct inode *inode = page->mapping->host;
2724-
struct buffer_head *head = fsdata;
2725-
struct buffer_head *bh;
2726-
BUG_ON(fsdata != NULL && page_has_buffers(page));
2727-
2728-
if (unlikely(copied < len) && head)
2729-
attach_nobh_buffers(page, head);
2730-
if (page_has_buffers(page))
2731-
return generic_write_end(file, mapping, pos, len,
2732-
copied, page, fsdata);
2733-
2734-
SetPageUptodate(page);
2735-
set_page_dirty(page);
2736-
if (pos+copied > inode->i_size) {
2737-
i_size_write(inode, pos+copied);
2738-
mark_inode_dirty(inode);
2739-
}
2740-
2741-
unlock_page(page);
2742-
put_page(page);
2743-
2744-
while (head) {
2745-
bh = head;
2746-
head = head->b_this_page;
2747-
free_buffer_head(bh);
2748-
}
2749-
2750-
return copied;
2751-
}
2752-
EXPORT_SYMBOL(nobh_write_end);
2753-
2754-
/*
2755-
* nobh_writepage() - based on block_full_write_page() except
2756-
* that it tries to operate without attaching bufferheads to
2757-
* the page.
2758-
*/
2759-
int nobh_writepage(struct page *page, get_block_t *get_block,
2760-
struct writeback_control *wbc)
2761-
{
2762-
struct inode * const inode = page->mapping->host;
2763-
loff_t i_size = i_size_read(inode);
2764-
const pgoff_t end_index = i_size >> PAGE_SHIFT;
2765-
unsigned offset;
2766-
int ret;
2767-
2768-
/* Is the page fully inside i_size? */
2769-
if (page->index < end_index)
2770-
goto out;
2771-
2772-
/* Is the page fully outside i_size? (truncate in progress) */
2773-
offset = i_size & (PAGE_SIZE-1);
2774-
if (page->index >= end_index+1 || !offset) {
2775-
unlock_page(page);
2776-
return 0; /* don't care */
2777-
}
2778-
2779-
/*
2780-
* The page straddles i_size. It must be zeroed out on each and every
2781-
* writepage invocation because it may be mmapped. "A file is mapped
2782-
* in multiples of the page size. For a file that is not a multiple of
2783-
* the page size, the remaining memory is zeroed when mapped, and
2784-
* writes to that region are not written out to the file."
2785-
*/
2786-
zero_user_segment(page, offset, PAGE_SIZE);
2787-
out:
2788-
ret = mpage_writepage(page, get_block, wbc);
2789-
if (ret == -EAGAIN)
2790-
ret = __block_write_full_page(inode, page, get_block, wbc,
2791-
end_buffer_async_write);
2792-
return ret;
2793-
}
2794-
EXPORT_SYMBOL(nobh_writepage);
2795-
2796-
int nobh_truncate_page(struct address_space *mapping,
2797-
loff_t from, get_block_t *get_block)
2798-
{
2799-
pgoff_t index = from >> PAGE_SHIFT;
2800-
struct inode *inode = mapping->host;
2801-
unsigned blocksize = i_blocksize(inode);
2802-
struct folio *folio;
2803-
struct buffer_head map_bh;
2804-
size_t offset;
2805-
sector_t iblock;
2806-
int err;
2807-
2808-
/* Block boundary? Nothing to do */
2809-
if (!(from & (blocksize - 1)))
2810-
return 0;
2811-
2812-
folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_CREAT,
2813-
mapping_gfp_mask(mapping));
2814-
err = -ENOMEM;
2815-
if (!folio)
2816-
goto out;
2817-
2818-
if (folio_buffers(folio))
2819-
goto has_buffers;
2820-
2821-
iblock = from >> inode->i_blkbits;
2822-
map_bh.b_size = blocksize;
2823-
map_bh.b_state = 0;
2824-
err = get_block(inode, iblock, &map_bh, 0);
2825-
if (err)
2826-
goto unlock;
2827-
/* unmapped? It's a hole - nothing to do */
2828-
if (!buffer_mapped(&map_bh))
2829-
goto unlock;
2830-
2831-
/* Ok, it's mapped. Make sure it's up-to-date */
2832-
if (!folio_test_uptodate(folio)) {
2833-
err = mapping->a_ops->read_folio(NULL, folio);
2834-
if (err) {
2835-
folio_put(folio);
2836-
goto out;
2837-
}
2838-
folio_lock(folio);
2839-
if (!folio_test_uptodate(folio)) {
2840-
err = -EIO;
2841-
goto unlock;
2842-
}
2843-
if (folio_buffers(folio))
2844-
goto has_buffers;
2845-
}
2846-
offset = offset_in_folio(folio, from);
2847-
folio_zero_segment(folio, offset, round_up(offset, blocksize));
2848-
folio_mark_dirty(folio);
2849-
err = 0;
2850-
2851-
unlock:
2852-
folio_unlock(folio);
2853-
folio_put(folio);
2854-
out:
2855-
return err;
2856-
2857-
has_buffers:
2858-
folio_unlock(folio);
2859-
folio_put(folio);
2860-
return block_truncate_page(mapping, from, get_block);
2861-
}
2862-
EXPORT_SYMBOL(nobh_truncate_page);
2863-
28642540
int block_truncate_page(struct address_space *mapping,
28652541
loff_t from, get_block_t *get_block)
28662542
{

fs/mpage.c

Lines changed: 1 addition & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -404,7 +404,6 @@ struct mpage_data {
404404
struct bio *bio;
405405
sector_t last_block_in_bio;
406406
get_block_t *get_block;
407-
unsigned use_writepage;
408407
};
409408

410409
/*
@@ -624,15 +623,10 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
624623
if (bio)
625624
bio = mpage_bio_submit(bio);
626625

627-
if (mpd->use_writepage) {
628-
ret = mapping->a_ops->writepage(page, wbc);
629-
} else {
630-
ret = -EAGAIN;
631-
goto out;
632-
}
633626
/*
634627
* The caller has a ref on the inode, so *mapping is stable
635628
*/
629+
ret = mapping->a_ops->writepage(page, wbc);
636630
mapping_set_error(mapping, ret);
637631
out:
638632
mpd->bio = bio;
@@ -674,7 +668,6 @@ mpage_writepages(struct address_space *mapping,
674668
.bio = NULL,
675669
.last_block_in_bio = 0,
676670
.get_block = get_block,
677-
.use_writepage = 1,
678671
};
679672

680673
ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
@@ -685,19 +678,3 @@ mpage_writepages(struct address_space *mapping,
685678
return ret;
686679
}
687680
EXPORT_SYMBOL(mpage_writepages);
688-
689-
int mpage_writepage(struct page *page, get_block_t get_block,
690-
struct writeback_control *wbc)
691-
{
692-
struct mpage_data mpd = {
693-
.bio = NULL,
694-
.last_block_in_bio = 0,
695-
.get_block = get_block,
696-
.use_writepage = 0,
697-
};
698-
int ret = __mpage_writepage(page, wbc, &mpd);
699-
if (mpd.bio)
700-
mpage_bio_submit(mpd.bio);
701-
return ret;
702-
}
703-
EXPORT_SYMBOL(mpage_writepage);

include/linux/buffer_head.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -258,14 +258,6 @@ static inline vm_fault_t block_page_mkwrite_return(int err)
258258
}
259259
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
260260
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
261-
int nobh_write_begin(struct address_space *, loff_t, unsigned len,
262-
struct page **, void **, get_block_t*);
263-
int nobh_write_end(struct file *, struct address_space *,
264-
loff_t, unsigned, unsigned,
265-
struct page *, void *);
266-
int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
267-
int nobh_writepage(struct page *page, get_block_t *get_block,
268-
struct writeback_control *wbc);
269261

270262
#ifdef CONFIG_MIGRATION
271263
extern int buffer_migrate_folio(struct address_space *,

0 commit comments

Comments
 (0)