Skip to content

Commit 4f8fcf4

Browse files
Hugh Dickinsakpm00
authored andcommitted
mm/swap: swap_vma_readahead() do the pte_offset_map()
swap_vma_readahead() has been proceeding in an unconventional way, its preliminary swap_ra_info() doing the pte_offset_map() and pte_unmap(), then relying on that pte pointer even after the pte_unmap() - in its CONFIG_64BIT case (I think !CONFIG_HIGHPTE was intended; whereas 32-bit copied ptes to stack while they were mapped, but had to limit how many). Though it would be difficult to construct a failing testcase, accessing page table after pte_unmap() will become bad practice, even on 64-bit: an rcu_read_unlock() in pte_unmap() will allow page table to be freed. Move relevant definitions from include/linux/swap.h to mm/swap_state.c, nothing else used them. Delete the CONFIG_64BIT distinction and buffer, delete all reference to ptes from swap_ra_info(), use pte_offset_map() repeatedly in swap_vma_readahead(), breaking from the loop if it fails. (Will the repeated "map" and "unmap" show up as a slowdown anywhere? If so, maybe modify __read_swap_cache_async() to do the pte_unmap() only when it does not find the page already in the swapcache.) Use ptep_get_lockless(), mainly for its READ_ONCE(). Correctly advance the address passed down to each call of __read__swap_cache_async(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Hugh Dickins <[email protected]> Reviewed-by: "Huang, Ying" <[email protected]> Cc: Alistair Popple <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Axel Rasmussen <[email protected]> Cc: Christophe Leroy <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Ira Weiny <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Lorenzo Stoakes <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Mike Rapoport (IBM) <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Pavel Tatashin <[email protected]> Cc: Peter Xu <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Qi Zheng <[email protected]> Cc: Ralph Campbell <[email protected]> Cc: Ryan Roberts <[email protected]> Cc: SeongJae Park <[email protected]> Cc: Song Liu <[email protected]> Cc: Steven Price <[email protected]> Cc: Suren Baghdasaryan <[email protected]> Cc: Thomas Hellström <[email protected]> Cc: Will Deacon <[email protected]> Cc: Yang Shi <[email protected]> Cc: Yu Zhao <[email protected]> Cc: Zack Rusin <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent feda5c3 commit 4f8fcf4

File tree

2 files changed

+24
-40
lines changed

2 files changed

+24
-40
lines changed

include/linux/swap.h

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -337,25 +337,6 @@ struct swap_info_struct {
337337
*/
338338
};
339339

340-
#ifdef CONFIG_64BIT
341-
#define SWAP_RA_ORDER_CEILING 5
342-
#else
343-
/* Avoid stack overflow, because we need to save part of page table */
344-
#define SWAP_RA_ORDER_CEILING 3
345-
#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
346-
#endif
347-
348-
struct vma_swap_readahead {
349-
unsigned short win;
350-
unsigned short offset;
351-
unsigned short nr_pte;
352-
#ifdef CONFIG_64BIT
353-
pte_t *ptes;
354-
#else
355-
pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
356-
#endif
357-
};
358-
359340
static inline swp_entry_t folio_swap_entry(struct folio *folio)
360341
{
361342
swp_entry_t entry = { .val = page_private(&folio->page) };

mm/swap_state.c

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -709,18 +709,22 @@ void exit_swap_address_space(unsigned int type)
709709
swapper_spaces[type] = NULL;
710710
}
711711

712+
#define SWAP_RA_ORDER_CEILING 5
713+
714+
struct vma_swap_readahead {
715+
unsigned short win;
716+
unsigned short offset;
717+
unsigned short nr_pte;
718+
};
719+
712720
static void swap_ra_info(struct vm_fault *vmf,
713721
struct vma_swap_readahead *ra_info)
714722
{
715723
struct vm_area_struct *vma = vmf->vma;
716724
unsigned long ra_val;
717725
unsigned long faddr, pfn, fpfn, lpfn, rpfn;
718726
unsigned long start, end;
719-
pte_t *pte, *orig_pte;
720727
unsigned int max_win, hits, prev_win, win;
721-
#ifndef CONFIG_64BIT
722-
pte_t *tpte;
723-
#endif
724728

725729
max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
726730
SWAP_RA_ORDER_CEILING);
@@ -739,12 +743,9 @@ static void swap_ra_info(struct vm_fault *vmf,
739743
max_win, prev_win);
740744
atomic_long_set(&vma->swap_readahead_info,
741745
SWAP_RA_VAL(faddr, win, 0));
742-
743746
if (win == 1)
744747
return;
745748

746-
/* Copy the PTEs because the page table may be unmapped */
747-
orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
748749
if (fpfn == pfn + 1) {
749750
lpfn = fpfn;
750751
rpfn = fpfn + win;
@@ -764,15 +765,6 @@ static void swap_ra_info(struct vm_fault *vmf,
764765

765766
ra_info->nr_pte = end - start;
766767
ra_info->offset = fpfn - start;
767-
pte -= ra_info->offset;
768-
#ifdef CONFIG_64BIT
769-
ra_info->ptes = pte;
770-
#else
771-
tpte = ra_info->ptes;
772-
for (pfn = start; pfn != end; pfn++)
773-
*tpte++ = *pte++;
774-
#endif
775-
pte_unmap(orig_pte);
776768
}
777769

778770
/**
@@ -796,7 +788,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
796788
struct swap_iocb *splug = NULL;
797789
struct vm_area_struct *vma = vmf->vma;
798790
struct page *page;
799-
pte_t *pte, pentry;
791+
pte_t *pte = NULL, pentry;
792+
unsigned long addr;
800793
swp_entry_t entry;
801794
unsigned int i;
802795
bool page_allocated;
@@ -808,17 +801,25 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
808801
if (ra_info.win == 1)
809802
goto skip;
810803

804+
addr = vmf->address - (ra_info.offset * PAGE_SIZE);
805+
811806
blk_start_plug(&plug);
812-
for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
813-
i++, pte++) {
814-
pentry = *pte;
807+
for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) {
808+
if (!pte++) {
809+
pte = pte_offset_map(vmf->pmd, addr);
810+
if (!pte)
811+
break;
812+
}
813+
pentry = ptep_get_lockless(pte);
815814
if (!is_swap_pte(pentry))
816815
continue;
817816
entry = pte_to_swp_entry(pentry);
818817
if (unlikely(non_swap_entry(entry)))
819818
continue;
819+
pte_unmap(pte);
820+
pte = NULL;
820821
page = __read_swap_cache_async(entry, gfp_mask, vma,
821-
vmf->address, &page_allocated);
822+
addr, &page_allocated);
822823
if (!page)
823824
continue;
824825
if (page_allocated) {
@@ -830,6 +831,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
830831
}
831832
put_page(page);
832833
}
834+
if (pte)
835+
pte_unmap(pte);
833836
blk_finish_plug(&plug);
834837
swap_read_unplug(splug);
835838
lru_add_drain();

0 commit comments

Comments
 (0)