88/**
99 * DOC: page_pool allocator
1010 *
11- * The page_pool allocator is optimized for the XDP mode that
12- * uses one frame per-page, but it can fallback on the
13- * regular page allocator APIs.
11+ * The page_pool allocator is optimized for recycling page or page fragment used
12+ * by skb packet and xdp frame.
1413 *
15- * Basic use involves replacing alloc_pages() calls with the
16- * page_pool_alloc_pages() call. Drivers should use
17- * page_pool_dev_alloc_pages() replacing dev_alloc_pages() .
14+ * Basic use involves replacing and alloc_pages() calls with page_pool_alloc(),
15+ * which allocate memory with or without page splitting depending on the
16+ * requested memory size .
1817 *
19- * The API keeps track of in-flight pages, in order to let API users know
20- * when it is safe to free a page_pool object. Thus, API users
21- * must call page_pool_put_page() to free the page, or attach
22- * the page to a page_pool-aware object like skbs marked with
18+ * If the driver knows that it always requires full pages or its allocations are
19+ * always smaller than half a page, it can use one of the more specific API
20+ * calls:
21+ *
22+ * 1. page_pool_alloc_pages(): allocate memory without page splitting when
23+ * driver knows that the memory it need is always bigger than half of the page
24+ * allocated from page pool. There is no cache line dirtying for 'struct page'
25+ * when a page is recycled back to the page pool.
26+ *
27+ * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver
28+ * knows that the memory it need is always smaller than or equal to half of the
29+ * page allocated from page pool. Page splitting enables memory saving and thus
30+ * avoids TLB/cache miss for data access, but there also is some cost to
31+ * implement page splitting, mainly some cache line dirtying/bouncing for
32+ * 'struct page' and atomic operation for page->pp_frag_count.
33+ *
34+ * The API keeps track of in-flight pages, in order to let API users know when
35+ * it is safe to free a page_pool object, the API users must call
36+ * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or
37+ * attach the page_pool object to a page_pool-aware object like skbs marked with
2338 * skb_mark_for_recycle().
2439 *
25- * API users must call page_pool_put_page() once on a page, as it
26- * will either recycle the page, or in case of refcnt > 1, it will
27- * release the DMA mapping and in-flight state accounting.
40+ * page_pool_put_page() may be called multi times on the same page if a page is
41+ * split into multi fragments. For the last fragment, it will either recycle the
42+ * page, or in case of page->_refcount > 1, it will release the DMA mapping and
43+ * in-flight state accounting.
44+ *
45+ * dma_sync_single_range_for_device() is only called for the last fragment when
46+ * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the
47+ * last freed fragment to do the sync_for_device operation for all fragments in
48+ * the same page when a page is split, the API user must setup pool->p.max_len
49+ * and pool->p.offset correctly and ensure that page_pool_put_page() is called
50+ * with dma_sync_size being -1 for fragment API.
2851 */
2952#ifndef _NET_PAGE_POOL_HELPERS_H
3053#define _NET_PAGE_POOL_HELPERS_H
@@ -73,6 +96,17 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
7396 return page_pool_alloc_pages (pool , gfp );
7497}
7598
99+ /**
100+ * page_pool_dev_alloc_frag() - allocate a page fragment.
101+ * @pool: pool from which to allocate
102+ * @offset: offset to the allocated page
103+ * @size: requested size
104+ *
105+ * Get a page fragment from the page allocator or page_pool caches.
106+ *
107+ * Return:
108+ * Return allocated page fragment, otherwise return NULL.
109+ */
76110static inline struct page * page_pool_dev_alloc_frag (struct page_pool * pool ,
77111 unsigned int * offset ,
78112 unsigned int size )
@@ -111,6 +145,19 @@ static inline struct page *page_pool_alloc(struct page_pool *pool,
111145 return page ;
112146}
113147
148+ /**
149+ * page_pool_dev_alloc() - allocate a page or a page fragment.
150+ * @pool: pool from which to allocate
151+ * @offset: offset to the allocated page
152+ * @size: in as the requested size, out as the allocated size
153+ *
154+ * Get a page or a page fragment from the page allocator or page_pool caches
155+ * depending on the requested size in order to allocate memory with least memory
156+ * utilization and performance penalty.
157+ *
158+ * Return:
159+ * Return allocated page or page fragment, otherwise return NULL.
160+ */
114161static inline struct page * page_pool_dev_alloc (struct page_pool * pool ,
115162 unsigned int * offset ,
116163 unsigned int * size )
@@ -134,6 +181,18 @@ static inline void *page_pool_alloc_va(struct page_pool *pool,
134181 return page_address (page ) + offset ;
135182}
136183
184+ /**
185+ * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its
186+ * va.
187+ * @pool: pool from which to allocate
188+ * @size: in as the requested size, out as the allocated size
189+ *
190+ * This is just a thin wrapper around the page_pool_alloc() API, and
191+ * it returns va of the allocated page or page fragment.
192+ *
193+ * Return:
194+ * Return the va for the allocated page or page fragment, otherwise return NULL.
195+ */
137196static inline void * page_pool_dev_alloc_va (struct page_pool * pool ,
138197 unsigned int * size )
139198{
@@ -281,6 +340,14 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
281340#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \
282341 (sizeof(dma_addr_t) > sizeof(unsigned long))
283342
343+ /**
344+ * page_pool_free_va() - free a va into the page_pool
345+ * @pool: pool from which va was allocated
346+ * @va: va to be freed
347+ * @allow_direct: freed by the consumer, allow lockless caching
348+ *
349+ * Free a va allocated from page_pool_allo_va().
350+ */
284351static inline void page_pool_free_va (struct page_pool * pool , void * va ,
285352 bool allow_direct )
286353{
0 commit comments