Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * fs/mpage.c |
| 4 | * |
| 5 | * Copyright (C) 2002, Linus Torvalds. |
| 6 | * |
| 7 | * Contains functions related to preparing and submitting BIOs which contain |
| 8 | * multiple pagecache pages. |
| 9 | * |
Francois Cami | e1f8e87 | 2008-10-15 22:01:59 -0700 | [diff] [blame] | 10 | * 15May2002 Andrew Morton |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * Initial version |
| 12 | * 27Jun2002 axboe@suse.de |
| 13 | * use bio_add_page() to build bio's just the right size |
| 14 | */ |
| 15 | |
| 16 | #include <linux/kernel.h> |
Paul Gortmaker | 630d9c4 | 2011-11-16 23:57:37 -0500 | [diff] [blame] | 17 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/mm.h> |
| 19 | #include <linux/kdev_t.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/bio.h> |
| 22 | #include <linux/fs.h> |
| 23 | #include <linux/buffer_head.h> |
| 24 | #include <linux/blkdev.h> |
| 25 | #include <linux/highmem.h> |
| 26 | #include <linux/prefetch.h> |
| 27 | #include <linux/mpage.h> |
Andrew Morton | 02c4363 | 2016-03-15 14:55:15 -0700 | [diff] [blame] | 28 | #include <linux/mm_inline.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/writeback.h> |
| 30 | #include <linux/backing-dev.h> |
| 31 | #include <linux/pagevec.h> |
Suren Baghdasaryan | 982031a | 2023-04-12 16:03:03 -0700 | [diff] [blame] | 32 | #include <linux/cleancache.h> |
Akinobu Mita | 4db96b7 | 2014-10-09 15:26:55 -0700 | [diff] [blame] | 33 | #include "internal.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
| 35 | /* |
| 36 | * I/O completion handler for multipage BIOs. |
| 37 | * |
| 38 | * The mpage code never puts partial pages into a BIO (except for end-of-file). |
| 39 | * If a page does not map to a contiguous run of blocks then it simply falls |
Matthew Wilcox (Oracle) | 2c69e20 | 2022-04-29 10:40:40 -0400 | [diff] [blame] | 40 | * back to block_read_full_folio(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | * |
| 42 | * Why is this? If a page's completion depends on a number of different BIOs |
| 43 | * which can complete in any order (or at the same time) then determining the |
| 44 | * status of that page is hard. See end_buffer_async_read() for the details. |
| 45 | * There is no point in duplicating all that complexity. |
| 46 | */ |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 47 | static void mpage_read_end_io(struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | { |
Pankaj Raghav | 09a607c | 2023-04-11 14:29:20 +0200 | [diff] [blame] | 49 | struct folio_iter fi; |
| 50 | int err = blk_status_to_errno(bio->bi_status); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
Pankaj Raghav | 09a607c | 2023-04-11 14:29:20 +0200 | [diff] [blame] | 52 | bio_for_each_folio_all(fi, bio) { |
| 53 | if (err) |
| 54 | folio_set_error(fi.folio); |
| 55 | else |
| 56 | folio_mark_uptodate(fi.folio); |
| 57 | folio_unlock(fi.folio); |
| 58 | } |
Kent Overstreet | 2c30c71 | 2013-11-07 12:20:26 -0800 | [diff] [blame] | 59 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | bio_put(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | } |
| 62 | |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 63 | static void mpage_write_end_io(struct bio *bio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | { |
Pankaj Raghav | 09a607c | 2023-04-11 14:29:20 +0200 | [diff] [blame] | 65 | struct folio_iter fi; |
| 66 | int err = blk_status_to_errno(bio->bi_status); |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 67 | |
Pankaj Raghav | 09a607c | 2023-04-11 14:29:20 +0200 | [diff] [blame] | 68 | bio_for_each_folio_all(fi, bio) { |
| 69 | if (err) { |
| 70 | folio_set_error(fi.folio); |
| 71 | mapping_set_error(fi.folio->mapping, err); |
| 72 | } |
| 73 | folio_end_writeback(fi.folio); |
| 74 | } |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 75 | |
| 76 | bio_put(bio); |
| 77 | } |
| 78 | |
| 79 | static struct bio *mpage_bio_submit_read(struct bio *bio) |
| 80 | { |
| 81 | bio->bi_end_io = mpage_read_end_io; |
| 82 | guard_bio_eod(bio); |
| 83 | submit_bio(bio); |
| 84 | return NULL; |
| 85 | } |
| 86 | |
| 87 | static struct bio *mpage_bio_submit_write(struct bio *bio) |
| 88 | { |
| 89 | bio->bi_end_io = mpage_write_end_io; |
Ming Lei | 83c9c54 | 2020-01-05 09:41:14 +0800 | [diff] [blame] | 90 | guard_bio_eod(bio); |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 91 | submit_bio(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | return NULL; |
| 93 | } |
| 94 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | /* |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 96 | * support function for mpage_readahead. The fs supplied get_block might |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | * return an up to date buffer. This is used to map that buffer into |
Matthew Wilcox (Oracle) | 2c69e20 | 2022-04-29 10:40:40 -0400 | [diff] [blame] | 98 | * the page, which allows read_folio to avoid triggering a duplicate call |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | * to get_block. |
| 100 | * |
| 101 | * The idea is to avoid adding buffers to pages that don't already have |
| 102 | * them. So when the buffer is up to date and the page size == block size, |
| 103 | * this marks the page up to date instead of adding new buffers. |
| 104 | */ |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 105 | static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh, |
| 106 | int page_block) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | { |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 108 | struct inode *inode = folio->mapping->host; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | struct buffer_head *page_bh, *head; |
| 110 | int block = 0; |
| 111 | |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 112 | head = folio_buffers(folio); |
| 113 | if (!head) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | /* |
| 115 | * don't make any buffers if there is only one buffer on |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 116 | * the folio and the folio just needs to be set up to date |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 118 | if (inode->i_blkbits == PAGE_SHIFT && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | buffer_uptodate(bh)) { |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 120 | folio_mark_uptodate(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | return; |
| 122 | } |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 123 | create_empty_buffers(&folio->page, i_blocksize(inode), 0); |
| 124 | head = folio_buffers(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | } |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 126 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | page_bh = head; |
| 128 | do { |
| 129 | if (block == page_block) { |
| 130 | page_bh->b_state = bh->b_state; |
| 131 | page_bh->b_bdev = bh->b_bdev; |
| 132 | page_bh->b_blocknr = bh->b_blocknr; |
| 133 | break; |
| 134 | } |
| 135 | page_bh = page_bh->b_this_page; |
| 136 | block++; |
| 137 | } while (page_bh != head); |
| 138 | } |
| 139 | |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 140 | struct mpage_readpage_args { |
| 141 | struct bio *bio; |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 142 | struct folio *folio; |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 143 | unsigned int nr_pages; |
Jens Axboe | 74c8164 | 2018-08-17 15:45:36 -0700 | [diff] [blame] | 144 | bool is_readahead; |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 145 | sector_t last_block_in_bio; |
| 146 | struct buffer_head map_bh; |
| 147 | unsigned long first_logical_block; |
| 148 | get_block_t *get_block; |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 149 | }; |
| 150 | |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 151 | /* |
| 152 | * This is the worker routine which does all the work of mapping the disk |
| 153 | * blocks and constructs largest possible bios, submits them for IO if the |
| 154 | * blocks are not contiguous on the disk. |
| 155 | * |
| 156 | * We pass a buffer_head back and forth and use its buffer_mapped() flag to |
| 157 | * represent the validity of its disk mapping and to decide when to do the next |
| 158 | * get_block() call. |
| 159 | */ |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 160 | static struct bio *do_mpage_readpage(struct mpage_readpage_args *args) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | { |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 162 | struct folio *folio = args->folio; |
| 163 | struct inode *inode = folio->mapping->host; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | const unsigned blkbits = inode->i_blkbits; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 165 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | const unsigned blocksize = 1 << blkbits; |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 167 | struct buffer_head *map_bh = &args->map_bh; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | sector_t block_in_file; |
| 169 | sector_t last_block; |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 170 | sector_t last_block_in_file; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | sector_t blocks[MAX_BUF_PER_PAGE]; |
| 172 | unsigned page_block; |
| 173 | unsigned first_hole = blocks_per_page; |
| 174 | struct block_device *bdev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | int length; |
| 176 | int fully_mapped = 1; |
Bart Van Assche | f84c94a | 2022-07-14 11:07:15 -0700 | [diff] [blame] | 177 | blk_opf_t opf = REQ_OP_READ; |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 178 | unsigned nblocks; |
| 179 | unsigned relative_block; |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 180 | gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); |
| 181 | |
| 182 | /* MAX_BUF_PER_PAGE, for example */ |
| 183 | VM_BUG_ON_FOLIO(folio_test_large(folio), folio); |
Jens Axboe | 74c8164 | 2018-08-17 15:45:36 -0700 | [diff] [blame] | 184 | |
| 185 | if (args->is_readahead) { |
Bart Van Assche | f84c94a | 2022-07-14 11:07:15 -0700 | [diff] [blame] | 186 | opf |= REQ_RAHEAD; |
Christoph Hellwig | 61285ff | 2022-03-23 16:39:52 +0100 | [diff] [blame] | 187 | gfp |= __GFP_NORETRY | __GFP_NOWARN; |
Jens Axboe | 74c8164 | 2018-08-17 15:45:36 -0700 | [diff] [blame] | 188 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 190 | if (folio_buffers(folio)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | goto confused; |
| 192 | |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 193 | block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits); |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 194 | last_block = block_in_file + args->nr_pages * blocks_per_page; |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 195 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; |
| 196 | if (last_block > last_block_in_file) |
| 197 | last_block = last_block_in_file; |
| 198 | page_block = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 200 | /* |
| 201 | * Map blocks using the result from the previous get_blocks call first. |
| 202 | */ |
| 203 | nblocks = map_bh->b_size >> blkbits; |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 204 | if (buffer_mapped(map_bh) && |
| 205 | block_in_file > args->first_logical_block && |
| 206 | block_in_file < (args->first_logical_block + nblocks)) { |
| 207 | unsigned map_offset = block_in_file - args->first_logical_block; |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 208 | unsigned last = nblocks - map_offset; |
| 209 | |
| 210 | for (relative_block = 0; ; relative_block++) { |
| 211 | if (relative_block == last) { |
| 212 | clear_buffer_mapped(map_bh); |
| 213 | break; |
| 214 | } |
| 215 | if (page_block == blocks_per_page) |
| 216 | break; |
| 217 | blocks[page_block] = map_bh->b_blocknr + map_offset + |
| 218 | relative_block; |
| 219 | page_block++; |
| 220 | block_in_file++; |
| 221 | } |
| 222 | bdev = map_bh->b_bdev; |
| 223 | } |
| 224 | |
| 225 | /* |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 226 | * Then do more get_blocks calls until we are done with this folio. |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 227 | */ |
Matthew Wilcox (Oracle) | a5fd839 | 2022-12-15 21:44:02 +0000 | [diff] [blame] | 228 | map_bh->b_folio = folio; |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 229 | while (page_block < blocks_per_page) { |
| 230 | map_bh->b_state = 0; |
| 231 | map_bh->b_size = 0; |
| 232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | if (block_in_file < last_block) { |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 234 | map_bh->b_size = (last_block-block_in_file) << blkbits; |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 235 | if (args->get_block(inode, block_in_file, map_bh, 0)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | goto confused; |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 237 | args->first_logical_block = block_in_file; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | } |
| 239 | |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 240 | if (!buffer_mapped(map_bh)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | fully_mapped = 0; |
| 242 | if (first_hole == blocks_per_page) |
| 243 | first_hole = page_block; |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 244 | page_block++; |
| 245 | block_in_file++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | continue; |
| 247 | } |
| 248 | |
| 249 | /* some filesystems will copy data into the page during |
| 250 | * the get_block call, in which case we don't want to |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 251 | * read it again. map_buffer_to_folio copies the data |
| 252 | * we just collected from get_block into the folio's buffers |
| 253 | * so read_folio doesn't have to repeat the get_block call |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | */ |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 255 | if (buffer_uptodate(map_bh)) { |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 256 | map_buffer_to_folio(folio, map_bh, page_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | goto confused; |
| 258 | } |
| 259 | |
| 260 | if (first_hole != blocks_per_page) |
| 261 | goto confused; /* hole -> non-hole */ |
| 262 | |
| 263 | /* Contiguous blocks? */ |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 264 | if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | goto confused; |
Badari Pulavarty | fa30bd0 | 2006-03-26 01:38:01 -0800 | [diff] [blame] | 266 | nblocks = map_bh->b_size >> blkbits; |
| 267 | for (relative_block = 0; ; relative_block++) { |
| 268 | if (relative_block == nblocks) { |
| 269 | clear_buffer_mapped(map_bh); |
| 270 | break; |
| 271 | } else if (page_block == blocks_per_page) |
| 272 | break; |
| 273 | blocks[page_block] = map_bh->b_blocknr+relative_block; |
| 274 | page_block++; |
| 275 | block_in_file++; |
| 276 | } |
| 277 | bdev = map_bh->b_bdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | if (first_hole != blocks_per_page) { |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 281 | folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | if (first_hole == 0) { |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 283 | folio_mark_uptodate(folio); |
| 284 | folio_unlock(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | goto out; |
| 286 | } |
| 287 | } else if (fully_mapped) { |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 288 | folio_set_mappedtodisk(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | } |
| 290 | |
Suren Baghdasaryan | 982031a | 2023-04-12 16:03:03 -0700 | [diff] [blame] | 291 | if (fully_mapped && blocks_per_page == 1 && !folio_test_uptodate(folio) && |
| 292 | cleancache_get_page(&folio->page) == 0) { |
| 293 | folio_mark_uptodate(folio); |
| 294 | goto confused; |
| 295 | } |
| 296 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | /* |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 298 | * This folio will go to BIO. Do we need to send this BIO off first? |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | */ |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 300 | if (args->bio && (args->last_block_in_bio != blocks[0] - 1)) |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 301 | args->bio = mpage_bio_submit_read(args->bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | |
| 303 | alloc_new: |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 304 | if (args->bio == NULL) { |
Bart Van Assche | f84c94a | 2022-07-14 11:07:15 -0700 | [diff] [blame] | 305 | args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf, |
Christoph Hellwig | 07888c66 | 2022-01-24 10:11:05 +0100 | [diff] [blame] | 306 | gfp); |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 307 | if (args->bio == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | goto confused; |
Christoph Hellwig | d5f68a4 | 2022-01-24 10:10:49 +0100 | [diff] [blame] | 309 | args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | } |
| 311 | |
| 312 | length = first_hole << blkbits; |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 313 | if (!bio_add_folio(args->bio, folio, length, 0)) { |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 314 | args->bio = mpage_bio_submit_read(args->bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | goto alloc_new; |
| 316 | } |
| 317 | |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 318 | relative_block = block_in_file - args->first_logical_block; |
Miquel van Smoorenburg | 38c8e61 | 2009-01-06 14:39:02 -0800 | [diff] [blame] | 319 | nblocks = map_bh->b_size >> blkbits; |
| 320 | if ((buffer_boundary(map_bh) && relative_block == nblocks) || |
| 321 | (first_hole != blocks_per_page)) |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 322 | args->bio = mpage_bio_submit_read(args->bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | else |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 324 | args->last_block_in_bio = blocks[blocks_per_page - 1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | out: |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 326 | return args->bio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | |
| 328 | confused: |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 329 | if (args->bio) |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 330 | args->bio = mpage_bio_submit_read(args->bio); |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 331 | if (!folio_test_uptodate(folio)) |
| 332 | block_read_full_folio(folio, args->get_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | else |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 334 | folio_unlock(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | goto out; |
| 336 | } |
| 337 | |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 338 | /** |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 339 | * mpage_readahead - start reads against pages |
| 340 | * @rac: Describes which pages to read. |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 341 | * @get_block: The filesystem's block mapper function. |
| 342 | * |
| 343 | * This function walks the pages and the blocks within each page, building and |
| 344 | * emitting large BIOs. |
| 345 | * |
| 346 | * If anything unusual happens, such as: |
| 347 | * |
| 348 | * - encountering a page which has buffers |
| 349 | * - encountering a page which has a non-hole after a hole |
| 350 | * - encountering a page with non-contiguous blocks |
| 351 | * |
| 352 | * then this code just gives up and calls the buffer_head-based read function. |
| 353 | * It does handle a page which has holes at the end - that is a common case: |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 354 | * the end-of-file on blocksize < PAGE_SIZE setups. |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 355 | * |
| 356 | * BH_Boundary explanation: |
| 357 | * |
| 358 | * There is a problem. The mpage read code assembles several pages, gets all |
| 359 | * their disk mappings, and then submits them all. That's fine, but obtaining |
| 360 | * the disk mappings may require I/O. Reads of indirect blocks, for example. |
| 361 | * |
| 362 | * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be |
| 363 | * submitted in the following order: |
Mauro Carvalho Chehab | 0117d42 | 2017-05-12 07:45:42 -0300 | [diff] [blame] | 364 | * |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 365 | * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 |
Randy Dunlap | 78a4a50 | 2008-02-29 22:02:31 -0800 | [diff] [blame] | 366 | * |
Martin Waitz | 67be2dd | 2005-05-01 08:59:26 -0700 | [diff] [blame] | 367 | * because the indirect block has to be read to get the mappings of blocks |
| 368 | * 13,14,15,16. Obviously, this impacts performance. |
| 369 | * |
| 370 | * So what we do it to allow the filesystem's get_block() function to set |
| 371 | * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block |
| 372 | * after this one will require I/O against a block which is probably close to |
| 373 | * this one. So you should push what I/O you have currently accumulated. |
| 374 | * |
| 375 | * This all causes the disk requests to be issued in the correct order. |
| 376 | */ |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 377 | void mpage_readahead(struct readahead_control *rac, get_block_t get_block) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | { |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 379 | struct folio *folio; |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 380 | struct mpage_readpage_args args = { |
| 381 | .get_block = get_block, |
Jens Axboe | 74c8164 | 2018-08-17 15:45:36 -0700 | [diff] [blame] | 382 | .is_readahead = true, |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 383 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 385 | while ((folio = readahead_folio(rac))) { |
| 386 | prefetchw(&folio->flags); |
| 387 | args.folio = folio; |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 388 | args.nr_pages = readahead_count(rac); |
| 389 | args.bio = do_mpage_readpage(&args); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | } |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 391 | if (args.bio) |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 392 | mpage_bio_submit_read(args.bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | } |
Greg Kroah-Hartman | a38b207 | 2020-07-02 12:51:03 +0200 | [diff] [blame] | 394 | EXPORT_SYMBOL_NS(mpage_readahead, ANDROID_GKI_VFS_EXPORT_ONLY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | |
| 396 | /* |
| 397 | * This isn't called much at all |
| 398 | */ |
Matthew Wilcox (Oracle) | f132ab7 | 2022-04-29 11:47:39 -0400 | [diff] [blame] | 399 | int mpage_read_folio(struct folio *folio, get_block_t get_block) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | { |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 401 | struct mpage_readpage_args args = { |
Matthew Wilcox (Oracle) | 211d044 | 2022-06-10 22:58:24 -0400 | [diff] [blame] | 402 | .folio = folio, |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 403 | .nr_pages = 1, |
| 404 | .get_block = get_block, |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 405 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | |
Jens Axboe | 357c120 | 2018-08-17 15:45:32 -0700 | [diff] [blame] | 407 | args.bio = do_mpage_readpage(&args); |
| 408 | if (args.bio) |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 409 | mpage_bio_submit_read(args.bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | return 0; |
| 411 | } |
Matthew Wilcox (Oracle) | f132ab7 | 2022-04-29 11:47:39 -0400 | [diff] [blame] | 412 | EXPORT_SYMBOL(mpage_read_folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | |
| 414 | /* |
| 415 | * Writing is not so simple. |
| 416 | * |
| 417 | * If the page has buffers then they will be used for obtaining the disk |
| 418 | * mapping. We only support pages which are fully mapped-and-dirty, with a |
| 419 | * special case for pages which are unmapped at the end: end-of-file. |
| 420 | * |
| 421 | * If the page has no buffers (preferred) then the page is mapped here. |
| 422 | * |
| 423 | * If all blocks are found to be contiguous then the page can go into the |
| 424 | * BIO. Otherwise fall back to the mapping's writepage(). |
| 425 | * |
| 426 | * FIXME: This code wants an estimate of how many pages are still to be |
| 427 | * written, so it can intelligently allocate a suitably-sized BIO. For now, |
| 428 | * just allocate full-size (16-page) BIOs. |
| 429 | */ |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 430 | |
Dmitri Vorobiev | ced117c | 2009-03-31 00:41:20 +0300 | [diff] [blame] | 431 | struct mpage_data { |
| 432 | struct bio *bio; |
| 433 | sector_t last_block_in_bio; |
| 434 | get_block_t *get_block; |
Dmitri Vorobiev | ced117c | 2009-03-31 00:41:20 +0300 | [diff] [blame] | 435 | }; |
| 436 | |
Matthew Wilcox | 90768ee | 2014-06-04 16:07:44 -0700 | [diff] [blame] | 437 | /* |
| 438 | * We have our BIO, so we can now mark the buffers clean. Make |
| 439 | * sure to only clean buffers which we know we'll be writing. |
| 440 | */ |
| 441 | static void clean_buffers(struct page *page, unsigned first_unmapped) |
| 442 | { |
| 443 | unsigned buffer_counter = 0; |
| 444 | struct buffer_head *bh, *head; |
| 445 | if (!page_has_buffers(page)) |
| 446 | return; |
| 447 | head = page_buffers(page); |
| 448 | bh = head; |
| 449 | |
| 450 | do { |
| 451 | if (buffer_counter++ == first_unmapped) |
| 452 | break; |
| 453 | clear_buffer_dirty(bh); |
| 454 | bh = bh->b_this_page; |
| 455 | } while (bh != head); |
| 456 | |
| 457 | /* |
| 458 | * we cannot drop the bh if the page is not uptodate or a concurrent |
Matthew Wilcox (Oracle) | 2c69e20 | 2022-04-29 10:40:40 -0400 | [diff] [blame] | 459 | * read_folio would fail to serialize with the bh and it would read from |
Matthew Wilcox | 90768ee | 2014-06-04 16:07:44 -0700 | [diff] [blame] | 460 | * disk before we reach the platter. |
| 461 | */ |
| 462 | if (buffer_heads_over_limit && PageUptodate(page)) |
Matthew Wilcox (Oracle) | 68189fe | 2022-05-01 01:08:08 -0400 | [diff] [blame] | 463 | try_to_free_buffers(page_folio(page)); |
Matthew Wilcox | 90768ee | 2014-06-04 16:07:44 -0700 | [diff] [blame] | 464 | } |
| 465 | |
Matthew Wilcox | f892760 | 2017-10-13 15:58:15 -0700 | [diff] [blame] | 466 | /* |
| 467 | * For situations where we want to clean all buffers attached to a page. |
| 468 | * We don't need to calculate how many buffers are attached to the page, |
| 469 | * we just need to specify a number larger than the maximum number of buffers. |
| 470 | */ |
| 471 | void clean_page_buffers(struct page *page) |
| 472 | { |
| 473 | clean_buffers(page, ~0U); |
| 474 | } |
| 475 | |
Matthew Wilcox (Oracle) | d585bdb | 2023-01-26 20:12:54 +0000 | [diff] [blame] | 476 | static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc, |
Alex Tomas | 29a814d | 2008-07-11 19:27:31 -0400 | [diff] [blame] | 477 | void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | { |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 479 | struct mpage_data *mpd = data; |
| 480 | struct bio *bio = mpd->bio; |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 481 | struct address_space *mapping = folio->mapping; |
| 482 | struct inode *inode = mapping->host; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | const unsigned blkbits = inode->i_blkbits; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 484 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | sector_t last_block; |
| 486 | sector_t block_in_file; |
| 487 | sector_t blocks[MAX_BUF_PER_PAGE]; |
| 488 | unsigned page_block; |
| 489 | unsigned first_unmapped = blocks_per_page; |
| 490 | struct block_device *bdev = NULL; |
| 491 | int boundary = 0; |
| 492 | sector_t boundary_block = 0; |
| 493 | struct block_device *boundary_bdev = NULL; |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 494 | size_t length; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | struct buffer_head map_bh; |
| 496 | loff_t i_size = i_size_read(inode); |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 497 | int ret = 0; |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 498 | struct buffer_head *head = folio_buffers(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 500 | if (head) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | struct buffer_head *bh = head; |
| 502 | |
| 503 | /* If they're all mapped and dirty, do it */ |
| 504 | page_block = 0; |
| 505 | do { |
| 506 | BUG_ON(buffer_locked(bh)); |
| 507 | if (!buffer_mapped(bh)) { |
| 508 | /* |
| 509 | * unmapped dirty buffers are created by |
Matthew Wilcox (Oracle) | e621900 | 2022-02-09 20:22:12 +0000 | [diff] [blame] | 510 | * block_dirty_folio -> mmapped data |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | */ |
| 512 | if (buffer_dirty(bh)) |
| 513 | goto confused; |
| 514 | if (first_unmapped == blocks_per_page) |
| 515 | first_unmapped = page_block; |
| 516 | continue; |
| 517 | } |
| 518 | |
| 519 | if (first_unmapped != blocks_per_page) |
| 520 | goto confused; /* hole -> non-hole */ |
| 521 | |
| 522 | if (!buffer_dirty(bh) || !buffer_uptodate(bh)) |
| 523 | goto confused; |
| 524 | if (page_block) { |
| 525 | if (bh->b_blocknr != blocks[page_block-1] + 1) |
| 526 | goto confused; |
| 527 | } |
| 528 | blocks[page_block++] = bh->b_blocknr; |
| 529 | boundary = buffer_boundary(bh); |
| 530 | if (boundary) { |
| 531 | boundary_block = bh->b_blocknr; |
| 532 | boundary_bdev = bh->b_bdev; |
| 533 | } |
| 534 | bdev = bh->b_bdev; |
| 535 | } while ((bh = bh->b_this_page) != head); |
| 536 | |
| 537 | if (first_unmapped) |
| 538 | goto page_is_mapped; |
| 539 | |
| 540 | /* |
| 541 | * Page has buffers, but they are all unmapped. The page was |
| 542 | * created by pagein or read over a hole which was handled by |
Matthew Wilcox (Oracle) | 2c69e20 | 2022-04-29 10:40:40 -0400 | [diff] [blame] | 543 | * block_read_full_folio(). If this address_space is also |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 544 | * using mpage_readahead then this can rarely happen. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | */ |
| 546 | goto confused; |
| 547 | } |
| 548 | |
| 549 | /* |
| 550 | * The page has no buffers: map it to disk |
| 551 | */ |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 552 | BUG_ON(!folio_test_uptodate(folio)); |
| 553 | block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits); |
Jan Kara | 4b89a37 | 2023-01-03 11:44:30 +0100 | [diff] [blame] | 554 | /* |
| 555 | * Whole page beyond EOF? Skip allocating blocks to avoid leaking |
| 556 | * space. |
| 557 | */ |
| 558 | if (block_in_file >= (i_size + (1 << blkbits) - 1) >> blkbits) |
| 559 | goto page_is_mapped; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | last_block = (i_size - 1) >> blkbits; |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 561 | map_bh.b_folio = folio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | for (page_block = 0; page_block < blocks_per_page; ) { |
| 563 | |
| 564 | map_bh.b_state = 0; |
Badari Pulavarty | b0cf2321 | 2006-03-26 01:38:00 -0800 | [diff] [blame] | 565 | map_bh.b_size = 1 << blkbits; |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 566 | if (mpd->get_block(inode, block_in_file, &map_bh, 1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | goto confused; |
Jan Kara | 7010839 | 2023-01-25 14:59:14 +0100 | [diff] [blame] | 568 | if (!buffer_mapped(&map_bh)) |
| 569 | goto confused; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | if (buffer_new(&map_bh)) |
Jan Kara | e64855c | 2016-11-04 18:08:15 +0100 | [diff] [blame] | 571 | clean_bdev_bh_alias(&map_bh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | if (buffer_boundary(&map_bh)) { |
| 573 | boundary_block = map_bh.b_blocknr; |
| 574 | boundary_bdev = map_bh.b_bdev; |
| 575 | } |
| 576 | if (page_block) { |
| 577 | if (map_bh.b_blocknr != blocks[page_block-1] + 1) |
| 578 | goto confused; |
| 579 | } |
| 580 | blocks[page_block++] = map_bh.b_blocknr; |
| 581 | boundary = buffer_boundary(&map_bh); |
| 582 | bdev = map_bh.b_bdev; |
| 583 | if (block_in_file == last_block) |
| 584 | break; |
| 585 | block_in_file++; |
| 586 | } |
| 587 | BUG_ON(page_block == 0); |
| 588 | |
| 589 | first_unmapped = page_block; |
| 590 | |
| 591 | page_is_mapped: |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 592 | /* Don't bother writing beyond EOF, truncate will discard the folio */ |
| 593 | if (folio_pos(folio) >= i_size) |
| 594 | goto confused; |
| 595 | length = folio_size(folio); |
| 596 | if (folio_pos(folio) + length > i_size) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | /* |
| 598 | * The page straddles i_size. It must be zeroed out on each |
Adam Buchbinder | 2a61aa4 | 2009-12-11 16:35:40 -0500 | [diff] [blame] | 599 | * and every writepage invocation because it may be mmapped. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | * "A file is mapped in multiples of the page size. For a file |
| 601 | * that is not a multiple of the page size, the remaining memory |
| 602 | * is zeroed when mapped, and writes to that region are not |
| 603 | * written out to the file." |
| 604 | */ |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 605 | length = i_size - folio_pos(folio); |
| 606 | folio_zero_segment(folio, length, folio_size(folio)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | } |
| 608 | |
| 609 | /* |
| 610 | * This page will go to BIO. Do we need to send this BIO off first? |
| 611 | */ |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 612 | if (bio && mpd->last_block_in_bio != blocks[0] - 1) |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 613 | bio = mpage_bio_submit_write(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | |
| 615 | alloc_new: |
| 616 | if (bio == NULL) { |
Christoph Hellwig | 77c436d | 2022-02-22 16:46:32 +0100 | [diff] [blame] | 617 | bio = bio_alloc(bdev, BIO_MAX_VECS, |
| 618 | REQ_OP_WRITE | wbc_to_write_flags(wbc), |
| 619 | GFP_NOFS); |
Christoph Hellwig | d5f68a4 | 2022-01-24 10:10:49 +0100 | [diff] [blame] | 620 | bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); |
Tejun Heo | b16b1de | 2015-06-02 08:39:48 -0600 | [diff] [blame] | 621 | wbc_init_bio(wbc, bio); |
Bart Van Assche | a90e30c | 2024-02-02 12:39:25 -0800 | [diff] [blame] | 622 | bio->bi_write_hint = inode->i_write_hint; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | } |
| 624 | |
| 625 | /* |
| 626 | * Must try to add the page before marking the buffer clean or |
| 627 | * the confused fail path above (OOM) will be very confused when |
| 628 | * it finds all bh marked clean (i.e. it will not write anything) |
| 629 | */ |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 630 | wbc_account_cgroup_owner(wbc, &folio->page, folio_size(folio)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | length = first_unmapped << blkbits; |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 632 | if (!bio_add_folio(bio, folio, length, 0)) { |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 633 | bio = mpage_bio_submit_write(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | goto alloc_new; |
| 635 | } |
| 636 | |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 637 | clean_buffers(&folio->page, first_unmapped); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 639 | BUG_ON(folio_test_writeback(folio)); |
| 640 | folio_start_writeback(folio); |
| 641 | folio_unlock(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | if (boundary || (first_unmapped != blocks_per_page)) { |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 643 | bio = mpage_bio_submit_write(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | if (boundary_block) { |
| 645 | write_boundary_block(boundary_bdev, |
| 646 | boundary_block, 1 << blkbits); |
| 647 | } |
| 648 | } else { |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 649 | mpd->last_block_in_bio = blocks[blocks_per_page - 1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | } |
| 651 | goto out; |
| 652 | |
| 653 | confused: |
| 654 | if (bio) |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 655 | bio = mpage_bio_submit_write(bio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | /* |
| 658 | * The caller has a ref on the inode, so *mapping is stable |
| 659 | */ |
Matthew Wilcox (Oracle) | 9160cff | 2023-01-26 20:12:55 +0000 | [diff] [blame] | 660 | ret = block_write_full_page(&folio->page, mpd->get_block, wbc); |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 661 | mapping_set_error(mapping, ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | out: |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 663 | mpd->bio = bio; |
| 664 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | } |
| 666 | |
| 667 | /** |
Randy Dunlap | 78a4a50 | 2008-02-29 22:02:31 -0800 | [diff] [blame] | 668 | * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | * @mapping: address space structure to write |
| 670 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write |
| 671 | * @get_block: the filesystem's block mapper function. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | * |
| 673 | * This is a library function, which implements the writepages() |
| 674 | * address_space_operation. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | */ |
| 676 | int |
| 677 | mpage_writepages(struct address_space *mapping, |
| 678 | struct writeback_control *wbc, get_block_t get_block) |
| 679 | { |
Christoph Hellwig | cf5e7a6 | 2022-06-13 07:37:15 +0200 | [diff] [blame] | 680 | struct mpage_data mpd = { |
| 681 | .get_block = get_block, |
| 682 | }; |
Jens Axboe | 2ed1a6b | 2010-06-22 12:52:14 +0200 | [diff] [blame] | 683 | struct blk_plug plug; |
Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 684 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | |
Jens Axboe | 2ed1a6b | 2010-06-22 12:52:14 +0200 | [diff] [blame] | 686 | blk_start_plug(&plug); |
Christoph Hellwig | cf5e7a6 | 2022-06-13 07:37:15 +0200 | [diff] [blame] | 687 | ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); |
| 688 | if (mpd.bio) |
Pankaj Raghav | f0d6ca4 | 2023-04-11 14:29:19 +0200 | [diff] [blame] | 689 | mpage_bio_submit_write(mpd.bio); |
Jens Axboe | 2ed1a6b | 2010-06-22 12:52:14 +0200 | [diff] [blame] | 690 | blk_finish_plug(&plug); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | return ret; |
| 692 | } |
| 693 | EXPORT_SYMBOL(mpage_writepages); |