66#include <linux/iopoll.h>
77#include <linux/etherdevice.h>
88#include <linux/platform_device.h>
9+ #include <linux/if_ether.h>
10+ #include <linux/if_vlan.h>
11+ #include <net/dsa.h>
912#include "mtk_eth_soc.h"
1013#include "mtk_ppe.h"
1114#include "mtk_ppe_regs.h"
1215
1316static DEFINE_SPINLOCK (ppe_lock );
1417
18+ static const struct rhashtable_params mtk_flow_l2_ht_params = {
19+ .head_offset = offsetof(struct mtk_flow_entry , l2_node ),
20+ .key_offset = offsetof(struct mtk_flow_entry , data .bridge ),
21+ .key_len = offsetof(struct mtk_foe_bridge , key_end ),
22+ .automatic_shrinking = true,
23+ };
24+
1525static void ppe_w32 (struct mtk_ppe * ppe , u32 reg , u32 val )
1626{
1727 writel (val , ppe -> base + reg );
@@ -123,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
123133{
124134 int type = FIELD_GET (MTK_FOE_IB1_PACKET_TYPE , entry -> ib1 );
125135
136+ if (type == MTK_PPE_PKT_TYPE_BRIDGE )
137+ return & entry -> bridge .l2 ;
138+
126139 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE )
127140 return & entry -> ipv6 .l2 ;
128141
@@ -134,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
134147{
135148 int type = FIELD_GET (MTK_FOE_IB1_PACKET_TYPE , entry -> ib1 );
136149
150+ if (type == MTK_PPE_PKT_TYPE_BRIDGE )
151+ return & entry -> bridge .ib2 ;
152+
137153 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE )
138154 return & entry -> ipv6 .ib2 ;
139155
@@ -168,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
168184 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T )
169185 entry -> ipv6 .ports = ports_pad ;
170186
171- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE ) {
187+ if (type == MTK_PPE_PKT_TYPE_BRIDGE ) {
188+ ether_addr_copy (entry -> bridge .src_mac , src_mac );
189+ ether_addr_copy (entry -> bridge .dest_mac , dest_mac );
190+ entry -> bridge .ib2 = val ;
191+ l2 = & entry -> bridge .l2 ;
192+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE ) {
172193 entry -> ipv6 .ib2 = val ;
173194 l2 = & entry -> ipv6 .l2 ;
174195 } else {
@@ -371,13 +392,97 @@ mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
371392 return !memcmp (& entry -> data .data , & data -> data , len - 4 );
372393}
373394
395+ static void
396+ __mtk_foe_entry_clear (struct mtk_ppe * ppe , struct mtk_flow_entry * entry )
397+ {
398+ struct hlist_head * head ;
399+ struct hlist_node * tmp ;
400+
401+ if (entry -> type == MTK_FLOW_TYPE_L2 ) {
402+ rhashtable_remove_fast (& ppe -> l2_flows , & entry -> l2_node ,
403+ mtk_flow_l2_ht_params );
404+
405+ head = & entry -> l2_flows ;
406+ hlist_for_each_entry_safe (entry , tmp , head , l2_data .list )
407+ __mtk_foe_entry_clear (ppe , entry );
408+ return ;
409+ }
410+
411+ hlist_del_init (& entry -> list );
412+ if (entry -> hash != 0xffff ) {
413+ ppe -> foe_table [entry -> hash ].ib1 &= ~MTK_FOE_IB1_STATE ;
414+ ppe -> foe_table [entry -> hash ].ib1 |= FIELD_PREP (MTK_FOE_IB1_STATE ,
415+ MTK_FOE_STATE_BIND );
416+ dma_wmb ();
417+ }
418+ entry -> hash = 0xffff ;
419+
420+ if (entry -> type != MTK_FLOW_TYPE_L2_SUBFLOW )
421+ return ;
422+
423+ hlist_del_init (& entry -> l2_data .list );
424+ kfree (entry );
425+ }
426+
427+ static int __mtk_foe_entry_idle_time (struct mtk_ppe * ppe , u32 ib1 )
428+ {
429+ u16 timestamp ;
430+ u16 now ;
431+
432+ now = mtk_eth_timestamp (ppe -> eth ) & MTK_FOE_IB1_BIND_TIMESTAMP ;
433+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP ;
434+
435+ if (timestamp > now )
436+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now ;
437+ else
438+ return now - timestamp ;
439+ }
440+
441+ static void
442+ mtk_flow_entry_update_l2 (struct mtk_ppe * ppe , struct mtk_flow_entry * entry )
443+ {
444+ struct mtk_flow_entry * cur ;
445+ struct mtk_foe_entry * hwe ;
446+ struct hlist_node * tmp ;
447+ int idle ;
448+
449+ idle = __mtk_foe_entry_idle_time (ppe , entry -> data .ib1 );
450+ hlist_for_each_entry_safe (cur , tmp , & entry -> l2_flows , l2_data .list ) {
451+ int cur_idle ;
452+ u32 ib1 ;
453+
454+ hwe = & ppe -> foe_table [cur -> hash ];
455+ ib1 = READ_ONCE (hwe -> ib1 );
456+
457+ if (FIELD_GET (MTK_FOE_IB1_STATE , ib1 ) != MTK_FOE_STATE_BIND ) {
458+ cur -> hash = 0xffff ;
459+ __mtk_foe_entry_clear (ppe , cur );
460+ continue ;
461+ }
462+
463+ cur_idle = __mtk_foe_entry_idle_time (ppe , ib1 );
464+ if (cur_idle >= idle )
465+ continue ;
466+
467+ idle = cur_idle ;
468+ entry -> data .ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP ;
469+ entry -> data .ib1 |= hwe -> ib1 & MTK_FOE_IB1_BIND_TIMESTAMP ;
470+ }
471+ }
472+
374473static void
375474mtk_flow_entry_update (struct mtk_ppe * ppe , struct mtk_flow_entry * entry )
376475{
377476 struct mtk_foe_entry * hwe ;
378477 struct mtk_foe_entry foe ;
379478
380479 spin_lock_bh (& ppe_lock );
480+
481+ if (entry -> type == MTK_FLOW_TYPE_L2 ) {
482+ mtk_flow_entry_update_l2 (ppe , entry );
483+ goto out ;
484+ }
485+
381486 if (entry -> hash == 0xffff )
382487 goto out ;
383488
@@ -419,21 +524,28 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
419524void mtk_foe_entry_clear (struct mtk_ppe * ppe , struct mtk_flow_entry * entry )
420525{
421526 spin_lock_bh (& ppe_lock );
422- hlist_del_init (& entry -> list );
423- if (entry -> hash != 0xffff ) {
424- ppe -> foe_table [entry -> hash ].ib1 &= ~MTK_FOE_IB1_STATE ;
425- ppe -> foe_table [entry -> hash ].ib1 |= FIELD_PREP (MTK_FOE_IB1_STATE ,
426- MTK_FOE_STATE_BIND );
427- dma_wmb ();
428- }
429- entry -> hash = 0xffff ;
527+ __mtk_foe_entry_clear (ppe , entry );
430528 spin_unlock_bh (& ppe_lock );
431529}
432530
531+ static int
532+ mtk_foe_entry_commit_l2 (struct mtk_ppe * ppe , struct mtk_flow_entry * entry )
533+ {
534+ entry -> type = MTK_FLOW_TYPE_L2 ;
535+
536+ return rhashtable_insert_fast (& ppe -> l2_flows , & entry -> l2_node ,
537+ mtk_flow_l2_ht_params );
538+ }
539+
433540int mtk_foe_entry_commit (struct mtk_ppe * ppe , struct mtk_flow_entry * entry )
434541{
435- u32 hash = mtk_ppe_hash_entry (& entry -> data );
542+ int type = FIELD_GET (MTK_FOE_IB1_PACKET_TYPE , entry -> data .ib1 );
543+ u32 hash ;
544+
545+ if (type == MTK_PPE_PKT_TYPE_BRIDGE )
546+ return mtk_foe_entry_commit_l2 (ppe , entry );
436547
548+ hash = mtk_ppe_hash_entry (& entry -> data );
437549 entry -> hash = 0xffff ;
438550 spin_lock_bh (& ppe_lock );
439551 hlist_add_head (& entry -> list , & ppe -> foe_flow [hash / 2 ]);
@@ -442,18 +554,72 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
442554 return 0 ;
443555}
444556
557+ static void
558+ mtk_foe_entry_commit_subflow (struct mtk_ppe * ppe , struct mtk_flow_entry * entry ,
559+ u16 hash )
560+ {
561+ struct mtk_flow_entry * flow_info ;
562+ struct mtk_foe_entry foe , * hwe ;
563+ struct mtk_foe_mac_info * l2 ;
564+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP ;
565+ int type ;
566+
567+ flow_info = kzalloc (offsetof(struct mtk_flow_entry , l2_data .end ),
568+ GFP_ATOMIC );
569+ if (!flow_info )
570+ return ;
571+
572+ flow_info -> l2_data .base_flow = entry ;
573+ flow_info -> type = MTK_FLOW_TYPE_L2_SUBFLOW ;
574+ flow_info -> hash = hash ;
575+ hlist_add_head (& flow_info -> list , & ppe -> foe_flow [hash / 2 ]);
576+ hlist_add_head (& flow_info -> l2_data .list , & entry -> l2_flows );
577+
578+ hwe = & ppe -> foe_table [hash ];
579+ memcpy (& foe , hwe , sizeof (foe ));
580+ foe .ib1 &= ib1_mask ;
581+ foe .ib1 |= entry -> data .ib1 & ~ib1_mask ;
582+
583+ l2 = mtk_foe_entry_l2 (& foe );
584+ memcpy (l2 , & entry -> data .bridge .l2 , sizeof (* l2 ));
585+
586+ type = FIELD_GET (MTK_FOE_IB1_PACKET_TYPE , foe .ib1 );
587+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT )
588+ memcpy (& foe .ipv4 .new , & foe .ipv4 .orig , sizeof (foe .ipv4 .new ));
589+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2 -> etype == ETH_P_IP )
590+ l2 -> etype = ETH_P_IPV6 ;
591+
592+ * mtk_foe_entry_ib2 (& foe ) = entry -> data .bridge .ib2 ;
593+
594+ __mtk_foe_entry_commit (ppe , & foe , hash );
595+ }
596+
445597void __mtk_ppe_check_skb (struct mtk_ppe * ppe , struct sk_buff * skb , u16 hash )
446598{
447599 struct hlist_head * head = & ppe -> foe_flow [hash / 2 ];
448- struct mtk_flow_entry * entry ;
449600 struct mtk_foe_entry * hwe = & ppe -> foe_table [hash ];
601+ struct mtk_flow_entry * entry ;
602+ struct mtk_foe_bridge key = {};
603+ struct ethhdr * eh ;
450604 bool found = false;
451-
452- if (hlist_empty (head ))
453- return ;
605+ u8 * tag ;
454606
455607 spin_lock_bh (& ppe_lock );
608+
609+ if (FIELD_GET (MTK_FOE_IB1_STATE , hwe -> ib1 ) == MTK_FOE_STATE_BIND )
610+ goto out ;
611+
456612 hlist_for_each_entry (entry , head , list ) {
613+ if (entry -> type == MTK_FLOW_TYPE_L2_SUBFLOW ) {
614+ if (unlikely (FIELD_GET (MTK_FOE_IB1_STATE , hwe -> ib1 ) ==
615+ MTK_FOE_STATE_BIND ))
616+ continue ;
617+
618+ entry -> hash = 0xffff ;
619+ __mtk_foe_entry_clear (ppe , entry );
620+ continue ;
621+ }
622+
457623 if (found || !mtk_flow_entry_match (entry , hwe )) {
458624 if (entry -> hash != 0xffff )
459625 entry -> hash = 0xffff ;
@@ -464,21 +630,50 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
464630 __mtk_foe_entry_commit (ppe , & entry -> data , hash );
465631 found = true;
466632 }
633+
634+ if (found )
635+ goto out ;
636+
637+ eh = eth_hdr (skb );
638+ ether_addr_copy (key .dest_mac , eh -> h_dest );
639+ ether_addr_copy (key .src_mac , eh -> h_source );
640+ tag = skb -> data - 2 ;
641+ key .vlan = 0 ;
642+ switch (skb -> protocol ) {
643+ #if IS_ENABLED (CONFIG_NET_DSA )
644+ case htons (ETH_P_XDSA ):
645+ if (!netdev_uses_dsa (skb -> dev ) ||
646+ skb -> dev -> dsa_ptr -> tag_ops -> proto != DSA_TAG_PROTO_MTK )
647+ goto out ;
648+
649+ tag += 4 ;
650+ if (get_unaligned_be16 (tag ) != ETH_P_8021Q )
651+ break ;
652+
653+ fallthrough ;
654+ #endif
655+ case htons (ETH_P_8021Q ):
656+ key .vlan = get_unaligned_be16 (tag + 2 ) & VLAN_VID_MASK ;
657+ break ;
658+ default :
659+ break ;
660+ }
661+
662+ entry = rhashtable_lookup_fast (& ppe -> l2_flows , & key , mtk_flow_l2_ht_params );
663+ if (!entry )
664+ goto out ;
665+
666+ mtk_foe_entry_commit_subflow (ppe , entry , hash );
667+
668+ out :
467669 spin_unlock_bh (& ppe_lock );
468670}
469671
470672int mtk_foe_entry_idle_time (struct mtk_ppe * ppe , struct mtk_flow_entry * entry )
471673{
472- u16 now = mtk_eth_timestamp (ppe -> eth ) & MTK_FOE_IB1_BIND_TIMESTAMP ;
473- u16 timestamp ;
474-
475674 mtk_flow_entry_update (ppe , entry );
476- timestamp = entry -> data .ib1 & MTK_FOE_IB1_BIND_TIMESTAMP ;
477675
478- if (timestamp > now )
479- return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now ;
480- else
481- return now - timestamp ;
676+ return __mtk_foe_entry_idle_time (ppe , entry -> data .ib1 );
482677}
483678
484679struct mtk_ppe * mtk_ppe_init (struct mtk_eth * eth , void __iomem * base ,
@@ -492,6 +687,8 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
492687 if (!ppe )
493688 return NULL ;
494689
690+ rhashtable_init (& ppe -> l2_flows , & mtk_flow_l2_ht_params );
691+
495692 /* need to allocate a separate device, since it PPE DMA access is
496693 * not coherent.
497694 */
0 commit comments