Skip to content

Commit fae7d83

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: add __dump_folio()
Turn __dump_page() into a wrapper around __dump_folio(). Snapshot the page & folio into a stack variable so we don't hit BUG_ON() if an allocation is freed under us and what was a folio pointer becomes a pointer to a tail page. [[email protected]: fix build issue] Link: https://lkml.kernel.org/r/[email protected] [[email protected]: fix __dump_folio] Link: https://lkml.kernel.org/r/[email protected] [[email protected]: fix pointer confusion] Link: https://lkml.kernel.org/r/[email protected] [[email protected]: s/printk/pr_warn/] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 7da8988 commit fae7d83

File tree

3 files changed

+83
-55
lines changed

3 files changed

+83
-55
lines changed

include/linux/mm.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2066,6 +2066,13 @@ static inline long folio_nr_pages(struct folio *folio)
20662066
#endif
20672067
}
20682068

2069+
/* Only hugetlbfs can allocate folios larger than MAX_ORDER */
2070+
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
2071+
#define MAX_FOLIO_NR_PAGES (1UL << PUD_ORDER)
2072+
#else
2073+
#define MAX_FOLIO_NR_PAGES MAX_ORDER_NR_PAGES
2074+
#endif
2075+
20692076
/*
20702077
* compound_nr() returns the number of pages in this potentially compound
20712078
* page. compound_nr() can be called on a tail page, and is defined to

include/linux/mmzone.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,9 +76,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES];
7676
#ifdef CONFIG_CMA
7777
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
7878
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
79+
# define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \
80+
get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
7981
#else
8082
# define is_migrate_cma(migratetype) false
8183
# define is_migrate_cma_page(_page) false
84+
# define is_migrate_cma_folio(folio, pfn) false
8285
#endif
8386

8487
static inline bool is_migrate_movable(int mt)

mm/debug.c

Lines changed: 73 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -51,84 +51,102 @@ const struct trace_print_flags vmaflag_names[] = {
5151
{0, NULL}
5252
};
5353

54-
static void __dump_page(struct page *page)
54+
static void __dump_folio(struct folio *folio, struct page *page,
55+
unsigned long pfn, unsigned long idx)
5556
{
56-
struct folio *folio = page_folio(page);
57-
struct page *head = &folio->page;
58-
struct address_space *mapping;
59-
bool compound = PageCompound(page);
60-
/*
61-
* Accessing the pageblock without the zone lock. It could change to
62-
* "isolate" again in the meantime, but since we are just dumping the
63-
* state for debugging, it should be fine to accept a bit of
64-
* inaccuracy here due to racing.
65-
*/
66-
bool page_cma = is_migrate_cma_page(page);
67-
int mapcount;
57+
struct address_space *mapping = folio_mapping(folio);
58+
int mapcount = 0;
6859
char *type = "";
6960

70-
if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
71-
/*
72-
* Corrupt page, so we cannot call page_mapping. Instead, do a
73-
* safe subset of the steps that page_mapping() does. Caution:
74-
* this will be misleading for tail pages, PageSwapCache pages,
75-
* and potentially other situations. (See the page_mapping()
76-
* implementation for what's missing here.)
77-
*/
78-
unsigned long tmp = (unsigned long)page->mapping;
79-
80-
if (tmp & PAGE_MAPPING_ANON)
81-
mapping = NULL;
82-
else
83-
mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
84-
head = page;
85-
folio = (struct folio *)page;
86-
compound = false;
87-
} else {
88-
mapping = page_mapping(page);
89-
}
90-
9161
/*
92-
* Avoid VM_BUG_ON() in page_mapcount().
93-
* page->_mapcount space in struct page is used by sl[aou]b pages to
94-
* encode own info.
62+
* page->_mapcount space in struct page is used by slab pages to
63+
* encode own info, and we must avoid calling page_folio() again.
9564
*/
96-
mapcount = PageSlab(head) ? 0 : page_mapcount(page);
97-
98-
pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
99-
page, page_ref_count(head), mapcount, mapping,
100-
page_to_pgoff(page), page_to_pfn(page));
101-
if (compound) {
102-
pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
103-
head, compound_order(head),
65+
if (!folio_test_slab(folio)) {
66+
mapcount = atomic_read(&page->_mapcount) + 1;
67+
if (folio_test_large(folio))
68+
mapcount += folio_entire_mapcount(folio);
69+
}
70+
71+
pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
72+
folio_ref_count(folio), mapcount, mapping,
73+
folio->index + idx, pfn);
74+
if (folio_test_large(folio)) {
75+
pr_warn("head: order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
76+
folio_order(folio),
10477
folio_entire_mapcount(folio),
10578
folio_nr_pages_mapped(folio),
10679
atomic_read(&folio->_pincount));
10780
}
10881

10982
#ifdef CONFIG_MEMCG
110-
if (head->memcg_data)
111-
pr_warn("memcg:%lx\n", head->memcg_data);
83+
if (folio->memcg_data)
84+
pr_warn("memcg:%lx\n", folio->memcg_data);
11285
#endif
113-
if (PageKsm(page))
86+
if (folio_test_ksm(folio))
11487
type = "ksm ";
115-
else if (PageAnon(page))
88+
else if (folio_test_anon(folio))
11689
type = "anon ";
11790
else if (mapping)
11891
dump_mapping(mapping);
11992
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
12093

121-
pr_warn("%sflags: %pGp%s\n", type, &head->flags,
122-
page_cma ? " CMA" : "");
123-
pr_warn("page_type: %pGt\n", &head->page_type);
94+
/*
95+
* Accessing the pageblock without the zone lock. It could change to
96+
* "isolate" again in the meantime, but since we are just dumping the
97+
* state for debugging, it should be fine to accept a bit of
98+
* inaccuracy here due to racing.
99+
*/
100+
pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
101+
is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
102+
pr_warn("page_type: %pGt\n", &folio->page.page_type);
124103

125104
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
126105
sizeof(unsigned long), page,
127106
sizeof(struct page), false);
128-
if (head != page)
107+
if (folio_test_large(folio))
129108
print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
130-
sizeof(unsigned long), head,
131-
sizeof(struct page), false);
109+
sizeof(unsigned long), folio,
110+
2 * sizeof(struct page), false);
111+
}
112+
113+
static void __dump_page(const struct page *page)
114+
{
115+
struct folio *foliop, folio;
116+
struct page precise;
117+
unsigned long pfn = page_to_pfn(page);
118+
unsigned long idx, nr_pages = 1;
119+
int loops = 5;
120+
121+
again:
122+
memcpy(&precise, page, sizeof(*page));
123+
foliop = page_folio(&precise);
124+
if (foliop == (struct folio *)&precise) {
125+
idx = 0;
126+
if (!folio_test_large(foliop))
127+
goto dump;
128+
foliop = (struct folio *)page;
129+
} else {
130+
idx = folio_page_idx(foliop, page);
131+
}
132+
133+
if (idx < MAX_FOLIO_NR_PAGES) {
134+
memcpy(&folio, foliop, 2 * sizeof(struct page));
135+
nr_pages = folio_nr_pages(&folio);
136+
foliop = &folio;
137+
}
138+
139+
if (idx > nr_pages) {
140+
if (loops-- > 0)
141+
goto again;
142+
pr_warn("page does not match folio\n");
143+
precise.compound_head &= ~1UL;
144+
foliop = (struct folio *)&precise;
145+
idx = 0;
146+
}
147+
148+
dump:
149+
__dump_folio(foliop, &precise, pfn, idx);
132150
}
133151

134152
void dump_page(struct page *page, const char *reason)

0 commit comments

Comments
 (0)