Skip to content

Commit b72e591

Browse files
Matthew Wilcoxbrauner
authored andcommitted
fs/buffer: remove batching from async read
block_read_full_folio() currently puts all !uptodate buffers into an array allocated on the stack, then iterates over it twice, first locking the buffers and then submitting them for read. We want to remove this array because it occupies too much stack space on configurations with a larger PAGE_SIZE (eg 512 bytes with 8 byte pointers and a 64KiB PAGE_SIZE). We cannot simply submit buffer heads as we find them as the completion handler needs to be able to tell when all reads are finished, so it can end the folio read. So we keep one buffer in reserve (using the 'prev' variable) until the end of the function. Reviewed-by: Hannes Reinecke <[email protected]> Signed-off-by: "Matthew Wilcox (Oracle)" <[email protected]> Signed-off-by: Luis Chamberlain <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Christian Brauner <[email protected]>
1 parent 753aade commit b72e591

File tree

1 file changed

+21
-30
lines changed

1 file changed

+21
-30
lines changed

fs/buffer.c

Lines changed: 21 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -2361,9 +2361,8 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
23612361
{
23622362
struct inode *inode = folio->mapping->host;
23632363
sector_t iblock, lblock;
2364-
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2364+
struct buffer_head *bh, *head, *prev = NULL;
23652365
size_t blocksize;
2366-
int nr, i;
23672366
int fully_mapped = 1;
23682367
bool page_error = false;
23692368
loff_t limit = i_size_read(inode);
@@ -2380,7 +2379,6 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
23802379
iblock = div_u64(folio_pos(folio), blocksize);
23812380
lblock = div_u64(limit + blocksize - 1, blocksize);
23822381
bh = head;
2383-
nr = 0;
23842382

23852383
do {
23862384
if (buffer_uptodate(bh))
@@ -2410,40 +2408,33 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
24102408
if (buffer_uptodate(bh))
24112409
continue;
24122410
}
2413-
arr[nr++] = bh;
2411+
2412+
lock_buffer(bh);
2413+
if (buffer_uptodate(bh)) {
2414+
unlock_buffer(bh);
2415+
continue;
2416+
}
2417+
2418+
mark_buffer_async_read(bh);
2419+
if (prev)
2420+
submit_bh(REQ_OP_READ, prev);
2421+
prev = bh;
24142422
} while (iblock++, (bh = bh->b_this_page) != head);
24152423

24162424
if (fully_mapped)
24172425
folio_set_mappedtodisk(folio);
24182426

2419-
if (!nr) {
2420-
/*
2421-
* All buffers are uptodate or get_block() returned an
2422-
* error when trying to map them - we can finish the read.
2423-
*/
2424-
folio_end_read(folio, !page_error);
2425-
return 0;
2426-
}
2427-
2428-
/* Stage two: lock the buffers */
2429-
for (i = 0; i < nr; i++) {
2430-
bh = arr[i];
2431-
lock_buffer(bh);
2432-
mark_buffer_async_read(bh);
2433-
}
2434-
24352427
/*
2436-
* Stage 3: start the IO. Check for uptodateness
2437-
* inside the buffer lock in case another process reading
2438-
* the underlying blockdev brought it uptodate (the sct fix).
2428+
* All buffers are uptodate or get_block() returned an error
2429+
* when trying to map them - we must finish the read because
2430+
* end_buffer_async_read() will never be called on any buffer
2431+
* in this folio.
24392432
*/
2440-
for (i = 0; i < nr; i++) {
2441-
bh = arr[i];
2442-
if (buffer_uptodate(bh))
2443-
end_buffer_async_read(bh, 1);
2444-
else
2445-
submit_bh(REQ_OP_READ, bh);
2446-
}
2433+
if (prev)
2434+
submit_bh(REQ_OP_READ, prev);
2435+
else
2436+
folio_end_read(folio, !page_error);
2437+
24472438
return 0;
24482439
}
24492440
EXPORT_SYMBOL(block_read_full_folio);

0 commit comments

Comments
 (0)