1010 * warranty of any kind, whether express or implied.
1111 */
1212
13- #include <linux/kernel .h>
13+ #include <linux/clk .h>
1414#include <linux/genalloc.h>
15- #include <linux/platform_device.h>
16- #include <linux/netdevice.h>
17- #include <linux/skbuff.h>
15+ #include <linux/io.h>
16+ #include <linux/kernel.h>
1817#include <linux/mbus.h>
1918#include <linux/module.h>
20- #include <linux/io .h>
19+ #include <linux/netdevice .h>
2120#include <linux/of.h>
22- #include <linux/clk.h>
21+ #include <linux/platform_device.h>
22+ #include <linux/skbuff.h>
23+ #include <net/hwbm.h>
2324#include "mvneta_bm.h"
2425
2526#define MVNETA_BM_DRIVER_NAME "mvneta_bm"
@@ -88,93 +89,27 @@ static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
8889 mvneta_bm_write (priv , MVNETA_BM_XBAR_POOL_REG (pool_id ), val );
8990}
9091
91- /* Allocate skb for BM pool */
92- void * mvneta_buf_alloc (struct mvneta_bm * priv , struct mvneta_bm_pool * bm_pool ,
93- dma_addr_t * buf_phys_addr )
92+ int mvneta_bm_construct (struct hwbm_pool * hwbm_pool , void * buf )
9493{
95- void * buf ;
94+ struct mvneta_bm_pool * bm_pool =
95+ (struct mvneta_bm_pool * )hwbm_pool -> priv ;
96+ struct mvneta_bm * priv = bm_pool -> priv ;
9697 dma_addr_t phys_addr ;
9798
98- buf = mvneta_frag_alloc (bm_pool -> frag_size );
99- if (!buf )
100- return NULL ;
101-
10299 /* In order to update buf_cookie field of RX descriptor properly,
103100 * BM hardware expects buf virtual address to be placed in the
104101 * first four bytes of mapped buffer.
105102 */
106103 * (u32 * )buf = (u32 )buf ;
107104 phys_addr = dma_map_single (& priv -> pdev -> dev , buf , bm_pool -> buf_size ,
108105 DMA_FROM_DEVICE );
109- if (unlikely (dma_mapping_error (& priv -> pdev -> dev , phys_addr ))) {
110- mvneta_frag_free (bm_pool -> frag_size , buf );
111- return NULL ;
112- }
113- * buf_phys_addr = phys_addr ;
114-
115- return buf ;
116- }
117-
118- /* Refill processing for HW buffer management */
119- int mvneta_bm_pool_refill (struct mvneta_bm * priv ,
120- struct mvneta_bm_pool * bm_pool )
121- {
122- dma_addr_t buf_phys_addr ;
123- void * buf ;
124-
125- buf = mvneta_buf_alloc (priv , bm_pool , & buf_phys_addr );
126- if (!buf )
106+ if (unlikely (dma_mapping_error (& priv -> pdev -> dev , phys_addr )))
127107 return - ENOMEM ;
128108
129- mvneta_bm_pool_put_bp (priv , bm_pool , buf_phys_addr );
130-
109+ mvneta_bm_pool_put_bp (priv , bm_pool , phys_addr );
131110 return 0 ;
132111}
133- EXPORT_SYMBOL_GPL (mvneta_bm_pool_refill );
134-
135- /* Allocate buffers for the pool */
136- int mvneta_bm_bufs_add (struct mvneta_bm * priv , struct mvneta_bm_pool * bm_pool ,
137- int buf_num )
138- {
139- int err , i ;
140-
141- if (bm_pool -> buf_num == bm_pool -> size ) {
142- dev_dbg (& priv -> pdev -> dev , "pool %d already filled\n" ,
143- bm_pool -> id );
144- return bm_pool -> buf_num ;
145- }
146-
147- if (buf_num < 0 ||
148- (buf_num + bm_pool -> buf_num > bm_pool -> size )) {
149- dev_err (& priv -> pdev -> dev ,
150- "cannot allocate %d buffers for pool %d\n" ,
151- buf_num , bm_pool -> id );
152- return 0 ;
153- }
154-
155- for (i = 0 ; i < buf_num ; i ++ ) {
156- err = mvneta_bm_pool_refill (priv , bm_pool );
157- if (err < 0 )
158- break ;
159- }
160-
161- /* Update BM driver with number of buffers added to pool */
162- bm_pool -> buf_num += i ;
163-
164- dev_dbg (& priv -> pdev -> dev ,
165- "%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n" ,
166- bm_pool -> type == MVNETA_BM_SHORT ? "short" : "long" ,
167- bm_pool -> id , bm_pool -> pkt_size , bm_pool -> buf_size ,
168- bm_pool -> frag_size );
169-
170- dev_dbg (& priv -> pdev -> dev ,
171- "%s pool %d: %d of %d buffers added\n" ,
172- bm_pool -> type == MVNETA_BM_SHORT ? "short" : "long" ,
173- bm_pool -> id , i , buf_num );
174-
175- return i ;
176- }
177- EXPORT_SYMBOL_GPL (mvneta_bm_bufs_add );
112+ EXPORT_SYMBOL_GPL (mvneta_bm_construct );
178113
179114/* Create pool */
180115static int mvneta_bm_pool_create (struct mvneta_bm * priv ,
@@ -183,8 +118,7 @@ static int mvneta_bm_pool_create(struct mvneta_bm *priv,
183118 struct platform_device * pdev = priv -> pdev ;
184119 u8 target_id , attr ;
185120 int size_bytes , err ;
186-
187- size_bytes = sizeof (u32 ) * bm_pool -> size ;
121+ size_bytes = sizeof (u32 ) * bm_pool -> hwbm_pool .size ;
188122 bm_pool -> virt_addr = dma_alloc_coherent (& pdev -> dev , size_bytes ,
189123 & bm_pool -> phys_addr ,
190124 GFP_KERNEL );
@@ -245,11 +179,16 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
245179
246180 /* Allocate buffers in case BM pool hasn't been used yet */
247181 if (new_pool -> type == MVNETA_BM_FREE ) {
182+ struct hwbm_pool * hwbm_pool = & new_pool -> hwbm_pool ;
183+
184+ new_pool -> priv = priv ;
248185 new_pool -> type = type ;
249186 new_pool -> buf_size = MVNETA_RX_BUF_SIZE (new_pool -> pkt_size );
250- new_pool -> frag_size =
187+ hwbm_pool -> frag_size =
251188 SKB_DATA_ALIGN (MVNETA_RX_BUF_SIZE (new_pool -> pkt_size )) +
252189 SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
190+ hwbm_pool -> construct = mvneta_bm_construct ;
191+ hwbm_pool -> priv = new_pool ;
253192
254193 /* Create new pool */
255194 err = mvneta_bm_pool_create (priv , new_pool );
@@ -260,10 +199,10 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
260199 }
261200
262201 /* Allocate buffers for this pool */
263- num = mvneta_bm_bufs_add ( priv , new_pool , new_pool -> size );
264- if (num != new_pool -> size ) {
202+ num = hwbm_pool_add ( hwbm_pool , hwbm_pool -> size , GFP_ATOMIC );
203+ if (num != hwbm_pool -> size ) {
265204 WARN (1 , "pool %d: %d of %d allocated\n" ,
266- new_pool -> id , num , new_pool -> size );
205+ new_pool -> id , num , hwbm_pool -> size );
267206 return NULL ;
268207 }
269208 }
@@ -284,7 +223,7 @@ void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
284223
285224 mvneta_bm_config_set (priv , MVNETA_BM_EMPTY_LIMIT_MASK );
286225
287- for (i = 0 ; i < bm_pool -> buf_num ; i ++ ) {
226+ for (i = 0 ; i < bm_pool -> hwbm_pool . buf_num ; i ++ ) {
288227 dma_addr_t buf_phys_addr ;
289228 u32 * vaddr ;
290229
@@ -303,32 +242,34 @@ void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
303242
304243 dma_unmap_single (& priv -> pdev -> dev , buf_phys_addr ,
305244 bm_pool -> buf_size , DMA_FROM_DEVICE );
306- mvneta_frag_free ( bm_pool -> frag_size , vaddr );
245+ hwbm_buf_free ( & bm_pool -> hwbm_pool , vaddr );
307246 }
308247
309248 mvneta_bm_config_clear (priv , MVNETA_BM_EMPTY_LIMIT_MASK );
310249
311250 /* Update BM driver with number of buffers removed from pool */
312- bm_pool -> buf_num -= i ;
251+ bm_pool -> hwbm_pool . buf_num -= i ;
313252}
314253EXPORT_SYMBOL_GPL (mvneta_bm_bufs_free );
315254
316255/* Cleanup pool */
317256void mvneta_bm_pool_destroy (struct mvneta_bm * priv ,
318257 struct mvneta_bm_pool * bm_pool , u8 port_map )
319258{
259+ struct hwbm_pool * hwbm_pool = & bm_pool -> hwbm_pool ;
320260 bm_pool -> port_map &= ~port_map ;
321261 if (bm_pool -> port_map )
322262 return ;
323263
324264 bm_pool -> type = MVNETA_BM_FREE ;
325265
326266 mvneta_bm_bufs_free (priv , bm_pool , port_map );
327- if (bm_pool -> buf_num )
267+ if (hwbm_pool -> buf_num )
328268 WARN (1 , "cannot free all buffers in pool %d\n" , bm_pool -> id );
329269
330270 if (bm_pool -> virt_addr ) {
331- dma_free_coherent (& priv -> pdev -> dev , sizeof (u32 ) * bm_pool -> size ,
271+ dma_free_coherent (& priv -> pdev -> dev ,
272+ sizeof (u32 ) * hwbm_pool -> size ,
332273 bm_pool -> virt_addr , bm_pool -> phys_addr );
333274 bm_pool -> virt_addr = NULL ;
334275 }
@@ -381,10 +322,10 @@ static void mvneta_bm_pools_init(struct mvneta_bm *priv)
381322 MVNETA_BM_POOL_CAP_ALIGN ));
382323 size = ALIGN (size , MVNETA_BM_POOL_CAP_ALIGN );
383324 }
384- bm_pool -> size = size ;
325+ bm_pool -> hwbm_pool . size = size ;
385326
386327 mvneta_bm_write (priv , MVNETA_BM_POOL_SIZE_REG (i ),
387- bm_pool -> size );
328+ bm_pool -> hwbm_pool . size );
388329
389330 /* Obtain custom pkt_size from DT */
390331 sprintf (prop , "pool%d,pkt-size" , i );
0 commit comments