2929 * page allocated from page pool. Page splitting enables memory saving and thus
3030 * avoids TLB/cache miss for data access, but there also is some cost to
3131 * implement page splitting, mainly some cache line dirtying/bouncing for
32- * 'struct page' and atomic operation for page->pp_frag_count .
32+ * 'struct page' and atomic operation for page->pp_ref_count .
3333 *
3434 * The API keeps track of in-flight pages, in order to let API users know when
3535 * it is safe to free a page_pool object, the API users must call
@@ -210,69 +210,77 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
210210 return pool -> p .dma_dir ;
211211}
212212
213- /* pp_frag_count represents the number of writers who can update the page
214- * either by updating skb->data or via DMA mappings for the device.
215- * We can't rely on the page refcnt for that as we don't know who might be
216- * holding page references and we can't reliably destroy or sync DMA mappings
217- * of the fragments.
213+ /**
214+ * page_pool_fragment_page() - split a fresh page into fragments
215+ * @page: page to split
216+ * @nr: references to set
217+ *
218+ * pp_ref_count represents the number of outstanding references to the page,
219+ * which will be freed using page_pool APIs (rather than page allocator APIs
220+ * like put_page()). Such references are usually held by page_pool-aware
221+ * objects like skbs marked for page pool recycling.
218222 *
219- * When pp_frag_count reaches 0 we can either recycle the page if the page
220- * refcnt is 1 or return it back to the memory allocator and destroy any
221- * mappings we have.
223+ * This helper allows the caller to take (set) multiple references to a
224+ * freshly allocated page. The page must be freshly allocated (have a
225+ * pp_ref_count of 1). This is commonly done by drivers and
226+ * "fragment allocators" to save atomic operations - either when they know
227+ * upfront how many references they will need; or to take MAX references and
228+ * return the unused ones with a single atomic dec(), instead of performing
229+ * multiple atomic inc() operations.
222230 */
223231static inline void page_pool_fragment_page (struct page * page , long nr )
224232{
225- atomic_long_set (& page -> pp_frag_count , nr );
233+ atomic_long_set (& page -> pp_ref_count , nr );
226234}
227235
228- static inline long page_pool_defrag_page (struct page * page , long nr )
236+ static inline long page_pool_unref_page (struct page * page , long nr )
229237{
230238 long ret ;
231239
232- /* If nr == pp_frag_count then we have cleared all remaining
240+ /* If nr == pp_ref_count then we have cleared all remaining
233241 * references to the page:
234242 * 1. 'n == 1': no need to actually overwrite it.
235243 * 2. 'n != 1': overwrite it with one, which is the rare case
236- * for pp_frag_count draining.
244+ * for pp_ref_count draining.
237245 *
238246 * The main advantage to doing this is that not only we avoid a atomic
239247 * update, as an atomic_read is generally a much cheaper operation than
240248 * an atomic update, especially when dealing with a page that may be
241- * partitioned into only 2 or 3 pieces ; but also unify the pp_frag_count
249+ * referenced by only 2 or 3 users ; but also unify the pp_ref_count
242250 * handling by ensuring all pages have partitioned into only 1 piece
243251 * initially, and only overwrite it when the page is partitioned into
244252 * more than one piece.
245253 */
246- if (atomic_long_read (& page -> pp_frag_count ) == nr ) {
254+ if (atomic_long_read (& page -> pp_ref_count ) == nr ) {
247255 /* As we have ensured nr is always one for constant case using
248256 * the BUILD_BUG_ON(), only need to handle the non-constant case
249- * here for pp_frag_count draining, which is a rare case.
257+ * here for pp_ref_count draining, which is a rare case.
250258 */
251259 BUILD_BUG_ON (__builtin_constant_p (nr ) && nr != 1 );
252260 if (!__builtin_constant_p (nr ))
253- atomic_long_set (& page -> pp_frag_count , 1 );
261+ atomic_long_set (& page -> pp_ref_count , 1 );
254262
255263 return 0 ;
256264 }
257265
258- ret = atomic_long_sub_return (nr , & page -> pp_frag_count );
266+ ret = atomic_long_sub_return (nr , & page -> pp_ref_count );
259267 WARN_ON (ret < 0 );
260268
261- /* We are the last user here too, reset pp_frag_count back to 1 to
269+ /* We are the last user here too, reset pp_ref_count back to 1 to
262270 * ensure all pages have been partitioned into 1 piece initially,
263271 * this should be the rare case when the last two fragment users call
264- * page_pool_defrag_page () currently.
272+ * page_pool_unref_page () currently.
265273 */
266274 if (unlikely (!ret ))
267- atomic_long_set (& page -> pp_frag_count , 1 );
275+ atomic_long_set (& page -> pp_ref_count , 1 );
268276
269277 return ret ;
270278}
271279
272- static inline bool page_pool_is_last_frag (struct page * page )
280+ static inline bool page_pool_is_last_ref (struct page * page )
273281{
274- /* If page_pool_defrag_page () returns 0, we were the last user */
275- return page_pool_defrag_page (page , 1 ) == 0 ;
282+ /* If page_pool_unref_page () returns 0, we were the last user */
283+ return page_pool_unref_page (page , 1 ) == 0 ;
276284}
277285
278286/**
@@ -297,10 +305,10 @@ static inline void page_pool_put_page(struct page_pool *pool,
297305 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
298306 */
299307#ifdef CONFIG_PAGE_POOL
300- if (!page_pool_is_last_frag (page ))
308+ if (!page_pool_is_last_ref (page ))
301309 return ;
302310
303- page_pool_put_defragged_page (pool , page , dma_sync_size , allow_direct );
311+ page_pool_put_unrefed_page (pool , page , dma_sync_size , allow_direct );
304312#endif
305313}
306314
0 commit comments