Skip to content

Commit 69ccfe7

Browse files
osalvadorvilardagatorvalds
authored andcommitted
x86/vmemmap: drop handling of 1GB vmemmap ranges
There is no code to allocate 1GB pages when mapping the vmemmap range as this might waste some memory and requires more complexity which is not really worth. Drop the dead code both for the aligned and unaligned cases and leave only the direct map handling. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Oscar Salvador <[email protected]> Suggested-by: David Hildenbrand <[email protected]> Reviewed-by: David Hildenbrand <[email protected]> Acked-by: Dave Hansen <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: "H . Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 8e2df19 commit 69ccfe7

File tree

1 file changed

+7
-28
lines changed

1 file changed

+7
-28
lines changed

arch/x86/mm/init_64.c

Lines changed: 7 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1062,7 +1062,6 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
10621062
unsigned long next, pages = 0;
10631063
pmd_t *pmd_base;
10641064
pud_t *pud;
1065-
void *page_addr;
10661065

10671066
pud = pud_start + pud_index(addr);
10681067
for (; addr < end; addr = next, pud++) {
@@ -1071,33 +1070,13 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
10711070
if (!pud_present(*pud))
10721071
continue;
10731072

1074-
if (pud_large(*pud)) {
1075-
if (IS_ALIGNED(addr, PUD_SIZE) &&
1076-
IS_ALIGNED(next, PUD_SIZE)) {
1077-
if (!direct)
1078-
free_pagetable(pud_page(*pud),
1079-
get_order(PUD_SIZE));
1080-
1081-
spin_lock(&init_mm.page_table_lock);
1082-
pud_clear(pud);
1083-
spin_unlock(&init_mm.page_table_lock);
1084-
pages++;
1085-
} else {
1086-
/* If here, we are freeing vmemmap pages. */
1087-
memset((void *)addr, PAGE_INUSE, next - addr);
1088-
1089-
page_addr = page_address(pud_page(*pud));
1090-
if (!memchr_inv(page_addr, PAGE_INUSE,
1091-
PUD_SIZE)) {
1092-
free_pagetable(pud_page(*pud),
1093-
get_order(PUD_SIZE));
1094-
1095-
spin_lock(&init_mm.page_table_lock);
1096-
pud_clear(pud);
1097-
spin_unlock(&init_mm.page_table_lock);
1098-
}
1099-
}
1100-
1073+
if (pud_large(*pud) &&
1074+
IS_ALIGNED(addr, PUD_SIZE) &&
1075+
IS_ALIGNED(next, PUD_SIZE)) {
1076+
spin_lock(&init_mm.page_table_lock);
1077+
pud_clear(pud);
1078+
spin_unlock(&init_mm.page_table_lock);
1079+
pages++;
11011080
continue;
11021081
}
11031082

0 commit comments

Comments
 (0)