4040
4141#define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(950)
4242
43- /* AMPDU rx reordering definitions */
44- #define BRCMF_RXREORDER_FLOWID_OFFSET 0
45- #define BRCMF_RXREORDER_MAXIDX_OFFSET 2
46- #define BRCMF_RXREORDER_FLAGS_OFFSET 4
47- #define BRCMF_RXREORDER_CURIDX_OFFSET 6
48- #define BRCMF_RXREORDER_EXPIDX_OFFSET 8
49-
50- #define BRCMF_RXREORDER_DEL_FLOW 0x01
51- #define BRCMF_RXREORDER_FLUSH_ALL 0x02
52- #define BRCMF_RXREORDER_CURIDX_VALID 0x04
53- #define BRCMF_RXREORDER_EXPIDX_VALID 0x08
54- #define BRCMF_RXREORDER_NEW_HOLE 0x10
55-
5643#define BRCMF_BSSIDX_INVALID -1
5744
5845char * brcmf_ifname (struct brcmf_if * ifp )
@@ -342,207 +329,11 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb,
342329 netif_rx_ni (skb );
343330}
344331
345- static void brcmf_rxreorder_get_skb_list (struct brcmf_ampdu_rx_reorder * rfi ,
346- u8 start , u8 end ,
347- struct sk_buff_head * skb_list )
348- {
349- /* initialize return list */
350- __skb_queue_head_init (skb_list );
351-
352- if (rfi -> pend_pkts == 0 ) {
353- brcmf_dbg (INFO , "no packets in reorder queue\n" );
354- return ;
355- }
356-
357- do {
358- if (rfi -> pktslots [start ]) {
359- __skb_queue_tail (skb_list , rfi -> pktslots [start ]);
360- rfi -> pktslots [start ] = NULL ;
361- }
362- start ++ ;
363- if (start > rfi -> max_idx )
364- start = 0 ;
365- } while (start != end );
366- rfi -> pend_pkts -= skb_queue_len (skb_list );
367- }
368-
369- static void brcmf_rxreorder_process_info (struct brcmf_if * ifp , u8 * reorder_data ,
370- struct sk_buff * pkt )
371- {
372- u8 flow_id , max_idx , cur_idx , exp_idx , end_idx ;
373- struct brcmf_ampdu_rx_reorder * rfi ;
374- struct sk_buff_head reorder_list ;
375- struct sk_buff * pnext ;
376- u8 flags ;
377- u32 buf_size ;
378-
379- flow_id = reorder_data [BRCMF_RXREORDER_FLOWID_OFFSET ];
380- flags = reorder_data [BRCMF_RXREORDER_FLAGS_OFFSET ];
381-
382- /* validate flags and flow id */
383- if (flags == 0xFF ) {
384- brcmf_err ("invalid flags...so ignore this packet\n" );
385- brcmf_netif_rx (ifp , pkt , false);
386- return ;
387- }
388-
389- rfi = ifp -> drvr -> reorder_flows [flow_id ];
390- if (flags & BRCMF_RXREORDER_DEL_FLOW ) {
391- brcmf_dbg (INFO , "flow-%d: delete\n" ,
392- flow_id );
393-
394- if (rfi == NULL ) {
395- brcmf_dbg (INFO , "received flags to cleanup, but no flow (%d) yet\n" ,
396- flow_id );
397- brcmf_netif_rx (ifp , pkt , false);
398- return ;
399- }
400-
401- brcmf_rxreorder_get_skb_list (rfi , rfi -> exp_idx , rfi -> exp_idx ,
402- & reorder_list );
403- /* add the last packet */
404- __skb_queue_tail (& reorder_list , pkt );
405- kfree (rfi );
406- ifp -> drvr -> reorder_flows [flow_id ] = NULL ;
407- goto netif_rx ;
408- }
409- /* from here on we need a flow reorder instance */
410- if (rfi == NULL ) {
411- buf_size = sizeof (* rfi );
412- max_idx = reorder_data [BRCMF_RXREORDER_MAXIDX_OFFSET ];
413-
414- buf_size += (max_idx + 1 ) * sizeof (pkt );
415-
416- /* allocate space for flow reorder info */
417- brcmf_dbg (INFO , "flow-%d: start, maxidx %d\n" ,
418- flow_id , max_idx );
419- rfi = kzalloc (buf_size , GFP_ATOMIC );
420- if (rfi == NULL ) {
421- brcmf_err ("failed to alloc buffer\n" );
422- brcmf_netif_rx (ifp , pkt , false);
423- return ;
424- }
425-
426- ifp -> drvr -> reorder_flows [flow_id ] = rfi ;
427- rfi -> pktslots = (struct sk_buff * * )(rfi + 1 );
428- rfi -> max_idx = max_idx ;
429- }
430- if (flags & BRCMF_RXREORDER_NEW_HOLE ) {
431- if (rfi -> pend_pkts ) {
432- brcmf_rxreorder_get_skb_list (rfi , rfi -> exp_idx ,
433- rfi -> exp_idx ,
434- & reorder_list );
435- WARN_ON (rfi -> pend_pkts );
436- } else {
437- __skb_queue_head_init (& reorder_list );
438- }
439- rfi -> cur_idx = reorder_data [BRCMF_RXREORDER_CURIDX_OFFSET ];
440- rfi -> exp_idx = reorder_data [BRCMF_RXREORDER_EXPIDX_OFFSET ];
441- rfi -> max_idx = reorder_data [BRCMF_RXREORDER_MAXIDX_OFFSET ];
442- rfi -> pktslots [rfi -> cur_idx ] = pkt ;
443- rfi -> pend_pkts ++ ;
444- brcmf_dbg (DATA , "flow-%d: new hole %d (%d), pending %d\n" ,
445- flow_id , rfi -> cur_idx , rfi -> exp_idx , rfi -> pend_pkts );
446- } else if (flags & BRCMF_RXREORDER_CURIDX_VALID ) {
447- cur_idx = reorder_data [BRCMF_RXREORDER_CURIDX_OFFSET ];
448- exp_idx = reorder_data [BRCMF_RXREORDER_EXPIDX_OFFSET ];
449-
450- if ((exp_idx == rfi -> exp_idx ) && (cur_idx != rfi -> exp_idx )) {
451- /* still in the current hole */
452- /* enqueue the current on the buffer chain */
453- if (rfi -> pktslots [cur_idx ] != NULL ) {
454- brcmf_dbg (INFO , "HOLE: ERROR buffer pending..free it\n" );
455- brcmu_pkt_buf_free_skb (rfi -> pktslots [cur_idx ]);
456- rfi -> pktslots [cur_idx ] = NULL ;
457- }
458- rfi -> pktslots [cur_idx ] = pkt ;
459- rfi -> pend_pkts ++ ;
460- rfi -> cur_idx = cur_idx ;
461- brcmf_dbg (DATA , "flow-%d: store pkt %d (%d), pending %d\n" ,
462- flow_id , cur_idx , exp_idx , rfi -> pend_pkts );
463-
464- /* can return now as there is no reorder
465- * list to process.
466- */
467- return ;
468- }
469- if (rfi -> exp_idx == cur_idx ) {
470- if (rfi -> pktslots [cur_idx ] != NULL ) {
471- brcmf_dbg (INFO , "error buffer pending..free it\n" );
472- brcmu_pkt_buf_free_skb (rfi -> pktslots [cur_idx ]);
473- rfi -> pktslots [cur_idx ] = NULL ;
474- }
475- rfi -> pktslots [cur_idx ] = pkt ;
476- rfi -> pend_pkts ++ ;
477-
478- /* got the expected one. flush from current to expected
479- * and update expected
480- */
481- brcmf_dbg (DATA , "flow-%d: expected %d (%d), pending %d\n" ,
482- flow_id , cur_idx , exp_idx , rfi -> pend_pkts );
483-
484- rfi -> cur_idx = cur_idx ;
485- rfi -> exp_idx = exp_idx ;
486-
487- brcmf_rxreorder_get_skb_list (rfi , cur_idx , exp_idx ,
488- & reorder_list );
489- brcmf_dbg (DATA , "flow-%d: freeing buffers %d, pending %d\n" ,
490- flow_id , skb_queue_len (& reorder_list ),
491- rfi -> pend_pkts );
492- } else {
493- u8 end_idx ;
494-
495- brcmf_dbg (DATA , "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n" ,
496- flow_id , flags , rfi -> cur_idx , rfi -> exp_idx ,
497- cur_idx , exp_idx );
498- if (flags & BRCMF_RXREORDER_FLUSH_ALL )
499- end_idx = rfi -> exp_idx ;
500- else
501- end_idx = exp_idx ;
502-
503- /* flush pkts first */
504- brcmf_rxreorder_get_skb_list (rfi , rfi -> exp_idx , end_idx ,
505- & reorder_list );
506-
507- if (exp_idx == ((cur_idx + 1 ) % (rfi -> max_idx + 1 ))) {
508- __skb_queue_tail (& reorder_list , pkt );
509- } else {
510- rfi -> pktslots [cur_idx ] = pkt ;
511- rfi -> pend_pkts ++ ;
512- }
513- rfi -> exp_idx = exp_idx ;
514- rfi -> cur_idx = cur_idx ;
515- }
516- } else {
517- /* explicity window move updating the expected index */
518- exp_idx = reorder_data [BRCMF_RXREORDER_EXPIDX_OFFSET ];
519-
520- brcmf_dbg (DATA , "flow-%d (0x%x): change expected: %d -> %d\n" ,
521- flow_id , flags , rfi -> exp_idx , exp_idx );
522- if (flags & BRCMF_RXREORDER_FLUSH_ALL )
523- end_idx = rfi -> exp_idx ;
524- else
525- end_idx = exp_idx ;
526-
527- brcmf_rxreorder_get_skb_list (rfi , rfi -> exp_idx , end_idx ,
528- & reorder_list );
529- __skb_queue_tail (& reorder_list , pkt );
530- /* set the new expected idx */
531- rfi -> exp_idx = exp_idx ;
532- }
533- netif_rx :
534- skb_queue_walk_safe (& reorder_list , pkt , pnext ) {
535- __skb_unlink (pkt , & reorder_list );
536- brcmf_netif_rx (ifp , pkt , false);
537- }
538- }
539-
540332void brcmf_rx_frame (struct device * dev , struct sk_buff * skb , bool handle_evnt )
541333{
542334 struct brcmf_if * ifp ;
543335 struct brcmf_bus * bus_if = dev_get_drvdata (dev );
544336 struct brcmf_pub * drvr = bus_if -> drvr ;
545- struct brcmf_skb_reorder_data * rd ;
546337 int ret ;
547338
548339 brcmf_dbg (DATA , "Enter: %s: rxp=%p\n" , dev_name (dev ), skb );
@@ -557,9 +348,8 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_evnt)
557348 return ;
558349 }
559350
560- rd = (struct brcmf_skb_reorder_data * )skb -> cb ;
561- if (rd -> reorder )
562- brcmf_rxreorder_process_info (ifp , rd -> reorder , skb );
351+ if (brcmf_proto_is_reorder_skb (skb ))
352+ brcmf_proto_rxreorder (ifp , skb );
563353 else
564354 brcmf_netif_rx (ifp , skb , handle_evnt );
565355}
0 commit comments