Skip to content

Commit bc6123a

Browse files
author
Matthew Wilcox (Oracle)
committed
iomap: Convert iomap_write_begin() and iomap_write_end() to folios
These functions still only work in PAGE_SIZE chunks, but there are fewer conversions from tail to head pages as a result of this patch. Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Darrick J. Wong <[email protected]>
1 parent a25def1 commit bc6123a

File tree

1 file changed

+33
-38
lines changed

1 file changed

+33
-38
lines changed

fs/iomap/buffered-io.c

Lines changed: 33 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -550,9 +550,8 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
550550
}
551551

552552
static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
553-
unsigned len, struct page *page)
553+
size_t len, struct folio *folio)
554554
{
555-
struct folio *folio = page_folio(page);
556555
const struct iomap *srcmap = iomap_iter_srcmap(iter);
557556
struct iomap_page *iop = iomap_page_create(iter->inode, folio);
558557
loff_t block_size = i_blocksize(iter->inode);
@@ -593,23 +592,21 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
593592
}
594593

595594
static int iomap_write_begin_inline(const struct iomap_iter *iter,
596-
struct page *page)
595+
struct folio *folio)
597596
{
598-
struct folio *folio = page_folio(page);
599-
600597
/* needs more work for the tailpacking case; disable for now */
601598
if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
602599
return -EIO;
603600
return iomap_read_inline_data(iter, folio);
604601
}
605602

606603
static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
607-
unsigned len, struct page **pagep)
604+
size_t len, struct folio **foliop)
608605
{
609606
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
610607
const struct iomap *srcmap = iomap_iter_srcmap(iter);
611-
struct page *page;
612608
struct folio *folio;
609+
unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
613610
int status = 0;
614611

615612
BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
@@ -628,32 +625,31 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
628625
return status;
629626
}
630627

631-
page = grab_cache_page_write_begin(iter->inode->i_mapping,
632-
pos >> PAGE_SHIFT, AOP_FLAG_NOFS);
633-
if (!page) {
628+
folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
629+
fgp, mapping_gfp_mask(iter->inode->i_mapping));
630+
if (!folio) {
634631
status = -ENOMEM;
635632
goto out_no_page;
636633
}
637-
folio = page_folio(page);
638634
if (pos + len > folio_pos(folio) + folio_size(folio))
639635
len = folio_pos(folio) + folio_size(folio) - pos;
640636

641637
if (srcmap->type == IOMAP_INLINE)
642-
status = iomap_write_begin_inline(iter, page);
638+
status = iomap_write_begin_inline(iter, folio);
643639
else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
644640
status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
645641
else
646-
status = __iomap_write_begin(iter, pos, len, page);
642+
status = __iomap_write_begin(iter, pos, len, folio);
647643

648644
if (unlikely(status))
649645
goto out_unlock;
650646

651-
*pagep = page;
647+
*foliop = folio;
652648
return 0;
653649

654650
out_unlock:
655-
unlock_page(page);
656-
put_page(page);
651+
folio_unlock(folio);
652+
folio_put(folio);
657653
iomap_write_failed(iter->inode, pos, len);
658654

659655
out_no_page:
@@ -663,11 +659,10 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
663659
}
664660

665661
static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
666-
size_t copied, struct page *page)
662+
size_t copied, struct folio *folio)
667663
{
668-
struct folio *folio = page_folio(page);
669664
struct iomap_page *iop = to_iomap_page(folio);
670-
flush_dcache_page(page);
665+
flush_dcache_folio(folio);
671666

672667
/*
673668
* The blocks that were entirely written will now be uptodate, so we
@@ -680,10 +675,10 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
680675
* non-uptodate page as a zero-length write, and force the caller to
681676
* redo the whole thing.
682677
*/
683-
if (unlikely(copied < len && !PageUptodate(page)))
678+
if (unlikely(copied < len && !folio_test_uptodate(folio)))
684679
return 0;
685680
iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
686-
__set_page_dirty_nobuffers(page);
681+
filemap_dirty_folio(inode->i_mapping, folio);
687682
return copied;
688683
}
689684

@@ -707,20 +702,20 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
707702

708703
/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
709704
static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
710-
size_t copied, struct page *page)
705+
size_t copied, struct folio *folio)
711706
{
712707
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
713708
const struct iomap *srcmap = iomap_iter_srcmap(iter);
714709
loff_t old_size = iter->inode->i_size;
715710
size_t ret;
716711

717712
if (srcmap->type == IOMAP_INLINE) {
718-
ret = iomap_write_end_inline(iter, page, pos, copied);
713+
ret = iomap_write_end_inline(iter, &folio->page, pos, copied);
719714
} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
720715
ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
721-
copied, page, NULL);
716+
copied, &folio->page, NULL);
722717
} else {
723-
ret = __iomap_write_end(iter->inode, pos, len, copied, page);
718+
ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
724719
}
725720

726721
/*
@@ -732,13 +727,13 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
732727
i_size_write(iter->inode, pos + ret);
733728
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
734729
}
735-
unlock_page(page);
730+
folio_unlock(folio);
736731

737732
if (old_size < pos)
738733
pagecache_isize_extended(iter->inode, old_size, pos);
739734
if (page_ops && page_ops->page_done)
740-
page_ops->page_done(iter->inode, pos, ret, page);
741-
put_page(page);
735+
page_ops->page_done(iter->inode, pos, ret, &folio->page);
736+
folio_put(folio);
742737

743738
if (ret < len)
744739
iomap_write_failed(iter->inode, pos, len);
@@ -753,6 +748,7 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
753748
long status = 0;
754749

755750
do {
751+
struct folio *folio;
756752
struct page *page;
757753
unsigned long offset; /* Offset into pagecache page */
758754
unsigned long bytes; /* Bytes to write to page */
@@ -776,16 +772,17 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
776772
break;
777773
}
778774

779-
status = iomap_write_begin(iter, pos, bytes, &page);
775+
status = iomap_write_begin(iter, pos, bytes, &folio);
780776
if (unlikely(status))
781777
break;
782778

779+
page = folio_file_page(folio, pos >> PAGE_SHIFT);
783780
if (mapping_writably_mapped(iter->inode->i_mapping))
784781
flush_dcache_page(page);
785782

786783
copied = copy_page_from_iter_atomic(page, offset, bytes, i);
787784

788-
status = iomap_write_end(iter, pos, bytes, copied, page);
785+
status = iomap_write_end(iter, pos, bytes, copied, folio);
789786

790787
if (unlikely(copied != status))
791788
iov_iter_revert(i, copied - status);
@@ -851,13 +848,13 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
851848
do {
852849
unsigned long offset = offset_in_page(pos);
853850
unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
854-
struct page *page;
851+
struct folio *folio;
855852

856-
status = iomap_write_begin(iter, pos, bytes, &page);
853+
status = iomap_write_begin(iter, pos, bytes, &folio);
857854
if (unlikely(status))
858855
return status;
859856

860-
status = iomap_write_end(iter, pos, bytes, bytes, page);
857+
status = iomap_write_end(iter, pos, bytes, bytes, folio);
861858
if (WARN_ON_ONCE(status == 0))
862859
return -EIO;
863860

@@ -894,15 +891,13 @@ EXPORT_SYMBOL_GPL(iomap_file_unshare);
894891
static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
895892
{
896893
struct folio *folio;
897-
struct page *page;
898894
int status;
899895
size_t offset;
900-
unsigned bytes = min_t(u64, UINT_MAX, length);
896+
size_t bytes = min_t(u64, SIZE_MAX, length);
901897

902-
status = iomap_write_begin(iter, pos, bytes, &page);
898+
status = iomap_write_begin(iter, pos, bytes, &folio);
903899
if (status)
904900
return status;
905-
folio = page_folio(page);
906901

907902
offset = offset_in_folio(folio, pos);
908903
if (bytes > folio_size(folio) - offset)
@@ -911,7 +906,7 @@ static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
911906
folio_zero_range(folio, offset, bytes);
912907
folio_mark_accessed(folio);
913908

914-
return iomap_write_end(iter, pos, bytes, bytes, page);
909+
return iomap_write_end(iter, pos, bytes, bytes, folio);
915910
}
916911

917912
static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)

0 commit comments

Comments
 (0)