Skip to content

Commit 8b45a4f

Browse files
mcgrofbrauner
authored andcommitted
fs/mpage: use blocks_per_folio instead of blocks_per_page
Convert mpage to folios and adjust accounting for the number of blocks within a folio instead of a single page. This also adjusts the number of pages we should process to be the size of the folio to ensure we always read a full folio. Note that the page cache code already ensures do_mpage_readpage() will work with folios respecting the address space min order, this ensures that so long as folio_size() is used for our requirements mpage will also now be able to process block sizes larger than the page size. Originally-by: Hannes Reinecke <[email protected]> Signed-off-by: Luis Chamberlain <[email protected]> Link: https://lore.kernel.org/r/[email protected] Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: Christian Brauner <[email protected]>
1 parent 86c60ef commit 8b45a4f

File tree

1 file changed

+21
-21
lines changed

1 file changed

+21
-21
lines changed

fs/mpage.c

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
107107
* don't make any buffers if there is only one buffer on
108108
* the folio and the folio just needs to be set up to date
109109
*/
110-
if (inode->i_blkbits == PAGE_SHIFT &&
110+
if (inode->i_blkbits == folio_shift(folio) &&
111111
buffer_uptodate(bh)) {
112112
folio_mark_uptodate(folio);
113113
return;
@@ -153,15 +153,15 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
153153
struct folio *folio = args->folio;
154154
struct inode *inode = folio->mapping->host;
155155
const unsigned blkbits = inode->i_blkbits;
156-
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
156+
const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
157157
const unsigned blocksize = 1 << blkbits;
158158
struct buffer_head *map_bh = &args->map_bh;
159159
sector_t block_in_file;
160160
sector_t last_block;
161161
sector_t last_block_in_file;
162162
sector_t first_block;
163163
unsigned page_block;
164-
unsigned first_hole = blocks_per_page;
164+
unsigned first_hole = blocks_per_folio;
165165
struct block_device *bdev = NULL;
166166
int length;
167167
int fully_mapped = 1;
@@ -182,7 +182,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
182182
goto confused;
183183

184184
block_in_file = folio_pos(folio) >> blkbits;
185-
last_block = block_in_file + args->nr_pages * blocks_per_page;
185+
last_block = block_in_file + ((args->nr_pages * PAGE_SIZE) >> blkbits);
186186
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
187187
if (last_block > last_block_in_file)
188188
last_block = last_block_in_file;
@@ -204,7 +204,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
204204
clear_buffer_mapped(map_bh);
205205
break;
206206
}
207-
if (page_block == blocks_per_page)
207+
if (page_block == blocks_per_folio)
208208
break;
209209
page_block++;
210210
block_in_file++;
@@ -216,7 +216,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
216216
* Then do more get_blocks calls until we are done with this folio.
217217
*/
218218
map_bh->b_folio = folio;
219-
while (page_block < blocks_per_page) {
219+
while (page_block < blocks_per_folio) {
220220
map_bh->b_state = 0;
221221
map_bh->b_size = 0;
222222

@@ -229,7 +229,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
229229

230230
if (!buffer_mapped(map_bh)) {
231231
fully_mapped = 0;
232-
if (first_hole == blocks_per_page)
232+
if (first_hole == blocks_per_folio)
233233
first_hole = page_block;
234234
page_block++;
235235
block_in_file++;
@@ -247,7 +247,7 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
247247
goto confused;
248248
}
249249

250-
if (first_hole != blocks_per_page)
250+
if (first_hole != blocks_per_folio)
251251
goto confused; /* hole -> non-hole */
252252

253253
/* Contiguous blocks? */
@@ -260,16 +260,16 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
260260
if (relative_block == nblocks) {
261261
clear_buffer_mapped(map_bh);
262262
break;
263-
} else if (page_block == blocks_per_page)
263+
} else if (page_block == blocks_per_folio)
264264
break;
265265
page_block++;
266266
block_in_file++;
267267
}
268268
bdev = map_bh->b_bdev;
269269
}
270270

271-
if (first_hole != blocks_per_page) {
272-
folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE);
271+
if (first_hole != blocks_per_folio) {
272+
folio_zero_segment(folio, first_hole << blkbits, folio_size(folio));
273273
if (first_hole == 0) {
274274
folio_mark_uptodate(folio);
275275
folio_unlock(folio);
@@ -303,10 +303,10 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
303303
relative_block = block_in_file - args->first_logical_block;
304304
nblocks = map_bh->b_size >> blkbits;
305305
if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
306-
(first_hole != blocks_per_page))
306+
(first_hole != blocks_per_folio))
307307
args->bio = mpage_bio_submit_read(args->bio);
308308
else
309-
args->last_block_in_bio = first_block + blocks_per_page - 1;
309+
args->last_block_in_bio = first_block + blocks_per_folio - 1;
310310
out:
311311
return args->bio;
312312

@@ -385,7 +385,7 @@ int mpage_read_folio(struct folio *folio, get_block_t get_block)
385385
{
386386
struct mpage_readpage_args args = {
387387
.folio = folio,
388-
.nr_pages = 1,
388+
.nr_pages = folio_nr_pages(folio),
389389
.get_block = get_block,
390390
};
391391

@@ -456,12 +456,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
456456
struct address_space *mapping = folio->mapping;
457457
struct inode *inode = mapping->host;
458458
const unsigned blkbits = inode->i_blkbits;
459-
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
459+
const unsigned blocks_per_folio = folio_size(folio) >> blkbits;
460460
sector_t last_block;
461461
sector_t block_in_file;
462462
sector_t first_block;
463463
unsigned page_block;
464-
unsigned first_unmapped = blocks_per_page;
464+
unsigned first_unmapped = blocks_per_folio;
465465
struct block_device *bdev = NULL;
466466
int boundary = 0;
467467
sector_t boundary_block = 0;
@@ -486,12 +486,12 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
486486
*/
487487
if (buffer_dirty(bh))
488488
goto confused;
489-
if (first_unmapped == blocks_per_page)
489+
if (first_unmapped == blocks_per_folio)
490490
first_unmapped = page_block;
491491
continue;
492492
}
493493

494-
if (first_unmapped != blocks_per_page)
494+
if (first_unmapped != blocks_per_folio)
495495
goto confused; /* hole -> non-hole */
496496

497497
if (!buffer_dirty(bh) || !buffer_uptodate(bh))
@@ -536,7 +536,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
536536
goto page_is_mapped;
537537
last_block = (i_size - 1) >> blkbits;
538538
map_bh.b_folio = folio;
539-
for (page_block = 0; page_block < blocks_per_page; ) {
539+
for (page_block = 0; page_block < blocks_per_folio; ) {
540540

541541
map_bh.b_state = 0;
542542
map_bh.b_size = 1 << blkbits;
@@ -618,14 +618,14 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
618618
BUG_ON(folio_test_writeback(folio));
619619
folio_start_writeback(folio);
620620
folio_unlock(folio);
621-
if (boundary || (first_unmapped != blocks_per_page)) {
621+
if (boundary || (first_unmapped != blocks_per_folio)) {
622622
bio = mpage_bio_submit_write(bio);
623623
if (boundary_block) {
624624
write_boundary_block(boundary_bdev,
625625
boundary_block, 1 << blkbits);
626626
}
627627
} else {
628-
mpd->last_block_in_bio = first_block + blocks_per_page - 1;
628+
mpd->last_block_in_bio = first_block + blocks_per_folio - 1;
629629
}
630630
goto out;
631631

0 commit comments

Comments
 (0)