Skip to content

Commit 8e2df19

Browse files
osalvadorvilardagatorvalds
authored andcommitted
x86/vmemmap: drop handling of 4K unaligned vmemmap range
Patch series "Cleanup and fixups for vmemmap handling", v6. This series contains cleanups to remove dead code that handles unaligned cases for 4K and 1GB pages (patch#1 and patch#2) when removing the vemmmap range, and a fix (patch#3) to handle the case when two vmemmap ranges intersect the same PMD. This patch (of 4): remove_pte_table() is prepared to handle the case where either the start or the end of the range is not PAGE aligned. This cannot actually happen: __populate_section_memmap enforces the range to be PMD aligned, so as long as the size of the struct page remains multiple of 8, the vmemmap range will be aligned to PAGE_SIZE. Drop the dead code and place a VM_BUG_ON in vmemmap_{populate,free} to catch nasty cases. Note that the VM_BUG_ON is placed in there because vmemmap_{populate,free= } is the gate of all removing and freeing page tables logic. Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Oscar Salvador <[email protected]> Suggested-by: David Hildenbrand <[email protected]> Reviewed-by: David Hildenbrand <[email protected]> Acked-by: Dave Hansen <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: "H . Peter Anvin" <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 0c1dcb0 commit 8e2df19

File tree

1 file changed

+13
-35
lines changed

1 file changed

+13
-35
lines changed

arch/x86/mm/init_64.c

Lines changed: 13 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -962,7 +962,6 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
962962
{
963963
unsigned long next, pages = 0;
964964
pte_t *pte;
965-
void *page_addr;
966965
phys_addr_t phys_addr;
967966

968967
pte = pte_start + pte_index(addr);
@@ -983,42 +982,15 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
983982
if (phys_addr < (phys_addr_t)0x40000000)
984983
return;
985984

986-
if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
987-
/*
988-
* Do not free direct mapping pages since they were
989-
* freed when offlining, or simply not in use.
990-
*/
991-
if (!direct)
992-
free_pagetable(pte_page(*pte), 0);
993-
994-
spin_lock(&init_mm.page_table_lock);
995-
pte_clear(&init_mm, addr, pte);
996-
spin_unlock(&init_mm.page_table_lock);
985+
if (!direct)
986+
free_pagetable(pte_page(*pte), 0);
997987

998-
/* For non-direct mapping, pages means nothing. */
999-
pages++;
1000-
} else {
1001-
/*
1002-
* If we are here, we are freeing vmemmap pages since
1003-
* direct mapped memory ranges to be freed are aligned.
1004-
*
1005-
* If we are not removing the whole page, it means
1006-
* other page structs in this page are being used and
1007-
* we cannot remove them. So fill the unused page_structs
1008-
* with 0xFD, and remove the page when it is wholly
1009-
* filled with 0xFD.
1010-
*/
1011-
memset((void *)addr, PAGE_INUSE, next - addr);
1012-
1013-
page_addr = page_address(pte_page(*pte));
1014-
if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
1015-
free_pagetable(pte_page(*pte), 0);
988+
spin_lock(&init_mm.page_table_lock);
989+
pte_clear(&init_mm, addr, pte);
990+
spin_unlock(&init_mm.page_table_lock);
1016991

1017-
spin_lock(&init_mm.page_table_lock);
1018-
pte_clear(&init_mm, addr, pte);
1019-
spin_unlock(&init_mm.page_table_lock);
1020-
}
1021-
}
992+
/* For non-direct mapping, pages means nothing. */
993+
pages++;
1022994
}
1023995

1024996
/* Call free_pte_table() in remove_pmd_table(). */
@@ -1197,6 +1169,9 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
11971169
void __ref vmemmap_free(unsigned long start, unsigned long end,
11981170
struct vmem_altmap *altmap)
11991171
{
1172+
VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
1173+
VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
1174+
12001175
remove_pagetable(start, end, false, altmap);
12011176
}
12021177

@@ -1556,6 +1531,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
15561531
{
15571532
int err;
15581533

1534+
VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
1535+
VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
1536+
15591537
if (end - start < PAGES_PER_SECTION * sizeof(struct page))
15601538
err = vmemmap_populate_basepages(start, end, node, NULL);
15611539
else if (boot_cpu_has(X86_FEATURE_PSE))

0 commit comments

Comments
 (0)