@@ -51,84 +51,102 @@ const struct trace_print_flags vmaflag_names[] = {
5151 {0 , NULL }
5252};
5353
54- static void __dump_page (struct page * page )
54+ static void __dump_folio (struct folio * folio , struct page * page ,
55+ unsigned long pfn , unsigned long idx )
5556{
56- struct folio * folio = page_folio (page );
57- struct page * head = & folio -> page ;
58- struct address_space * mapping ;
59- bool compound = PageCompound (page );
60- /*
61- * Accessing the pageblock without the zone lock. It could change to
62- * "isolate" again in the meantime, but since we are just dumping the
63- * state for debugging, it should be fine to accept a bit of
64- * inaccuracy here due to racing.
65- */
66- bool page_cma = is_migrate_cma_page (page );
67- int mapcount ;
57+ struct address_space * mapping = folio_mapping (folio );
58+ int mapcount = 0 ;
6859 char * type = "" ;
6960
70- if (page < head || (page >= head + MAX_ORDER_NR_PAGES )) {
71- /*
72- * Corrupt page, so we cannot call page_mapping. Instead, do a
73- * safe subset of the steps that page_mapping() does. Caution:
74- * this will be misleading for tail pages, PageSwapCache pages,
75- * and potentially other situations. (See the page_mapping()
76- * implementation for what's missing here.)
77- */
78- unsigned long tmp = (unsigned long )page -> mapping ;
79-
80- if (tmp & PAGE_MAPPING_ANON )
81- mapping = NULL ;
82- else
83- mapping = (void * )(tmp & ~PAGE_MAPPING_FLAGS );
84- head = page ;
85- folio = (struct folio * )page ;
86- compound = false;
87- } else {
88- mapping = page_mapping (page );
89- }
90-
9161 /*
92- * Avoid VM_BUG_ON() in page_mapcount().
93- * page->_mapcount space in struct page is used by sl[aou]b pages to
94- * encode own info.
62+ * page->_mapcount space in struct page is used by slab pages to
63+ * encode own info, and we must avoid calling page_folio() again.
9564 */
96- mapcount = PageSlab (head ) ? 0 : page_mapcount (page );
97-
98- pr_warn ("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n" ,
99- page , page_ref_count (head ), mapcount , mapping ,
100- page_to_pgoff (page ), page_to_pfn (page ));
101- if (compound ) {
102- pr_warn ("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n" ,
103- head , compound_order (head ),
65+ if (!folio_test_slab (folio )) {
66+ mapcount = atomic_read (& page -> _mapcount ) + 1 ;
67+ if (folio_test_large (folio ))
68+ mapcount += folio_entire_mapcount (folio );
69+ }
70+
71+ pr_warn ("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n" ,
72+ folio_ref_count (folio ), mapcount , mapping ,
73+ folio -> index + idx , pfn );
74+ if (folio_test_large (folio )) {
75+ pr_warn ("head: order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n" ,
76+ folio_order (folio ),
10477 folio_entire_mapcount (folio ),
10578 folio_nr_pages_mapped (folio ),
10679 atomic_read (& folio -> _pincount ));
10780 }
10881
10982#ifdef CONFIG_MEMCG
110- if (head -> memcg_data )
111- pr_warn ("memcg:%lx\n" , head -> memcg_data );
83+ if (folio -> memcg_data )
84+ pr_warn ("memcg:%lx\n" , folio -> memcg_data );
11285#endif
113- if (PageKsm ( page ))
86+ if (folio_test_ksm ( folio ))
11487 type = "ksm " ;
115- else if (PageAnon ( page ))
88+ else if (folio_test_anon ( folio ))
11689 type = "anon " ;
11790 else if (mapping )
11891 dump_mapping (mapping );
11992 BUILD_BUG_ON (ARRAY_SIZE (pageflag_names ) != __NR_PAGEFLAGS + 1 );
12093
121- pr_warn ("%sflags: %pGp%s\n" , type , & head -> flags ,
122- page_cma ? " CMA" : "" );
123- pr_warn ("page_type: %pGt\n" , & head -> page_type );
94+ /*
95+ * Accessing the pageblock without the zone lock. It could change to
96+ * "isolate" again in the meantime, but since we are just dumping the
97+ * state for debugging, it should be fine to accept a bit of
98+ * inaccuracy here due to racing.
99+ */
100+ pr_warn ("%sflags: %pGp%s\n" , type , & folio -> flags ,
101+ is_migrate_cma_folio (folio , pfn ) ? " CMA" : "" );
102+ pr_warn ("page_type: %pGt\n" , & folio -> page .page_type );
124103
125104 print_hex_dump (KERN_WARNING , "raw: " , DUMP_PREFIX_NONE , 32 ,
126105 sizeof (unsigned long ), page ,
127106 sizeof (struct page ), false);
128- if (head != page )
107+ if (folio_test_large ( folio ) )
129108 print_hex_dump (KERN_WARNING , "head: " , DUMP_PREFIX_NONE , 32 ,
130- sizeof (unsigned long ), head ,
131- sizeof (struct page ), false);
109+ sizeof (unsigned long ), folio ,
110+ 2 * sizeof (struct page ), false);
111+ }
112+
113+ static void __dump_page (const struct page * page )
114+ {
115+ struct folio * foliop , folio ;
116+ struct page precise ;
117+ unsigned long pfn = page_to_pfn (page );
118+ unsigned long idx , nr_pages = 1 ;
119+ int loops = 5 ;
120+
121+ again :
122+ memcpy (& precise , page , sizeof (* page ));
123+ foliop = page_folio (& precise );
124+ if (foliop == (struct folio * )& precise ) {
125+ idx = 0 ;
126+ if (!folio_test_large (foliop ))
127+ goto dump ;
128+ foliop = (struct folio * )page ;
129+ } else {
130+ idx = folio_page_idx (foliop , page );
131+ }
132+
133+ if (idx < MAX_FOLIO_NR_PAGES ) {
134+ memcpy (& folio , foliop , 2 * sizeof (struct page ));
135+ nr_pages = folio_nr_pages (& folio );
136+ foliop = & folio ;
137+ }
138+
139+ if (idx > nr_pages ) {
140+ if (loops -- > 0 )
141+ goto again ;
142+ pr_warn ("page does not match folio\n" );
143+ precise .compound_head &= ~1UL ;
144+ foliop = (struct folio * )& precise ;
145+ idx = 0 ;
146+ }
147+
148+ dump :
149+ __dump_folio (foliop , & precise , pfn , idx );
132150}
133151
134152void dump_page (struct page * page , const char * reason )
0 commit comments