@@ -80,7 +80,9 @@ static const struct mtk_reg_map mtk_reg_map = {
80
80
.fq_blen = 0x1b2c ,
81
81
},
82
82
.gdm1_cnt = 0x2400 ,
83
- .gdma_to_ppe = 0x4444 ,
83
+ .gdma_to_ppe = {
84
+ [0 ] = 0x4444 ,
85
+ },
84
86
.ppe_base = 0x0c00 ,
85
87
.wdma_base = {
86
88
[0 ] = 0x2800 ,
@@ -144,7 +146,10 @@ static const struct mtk_reg_map mt7986_reg_map = {
144
146
.tx_sch_rate = 0x4798 ,
145
147
},
146
148
.gdm1_cnt = 0x1c00 ,
147
- .gdma_to_ppe = 0x3333 ,
149
+ .gdma_to_ppe = {
150
+ [0 ] = 0x3333 ,
151
+ [1 ] = 0x4444 ,
152
+ },
148
153
.ppe_base = 0x2000 ,
149
154
.wdma_base = {
150
155
[0 ] = 0x4800 ,
@@ -192,7 +197,11 @@ static const struct mtk_reg_map mt7988_reg_map = {
192
197
.tx_sch_rate = 0x4798 ,
193
198
},
194
199
.gdm1_cnt = 0x1c00 ,
195
- .gdma_to_ppe = 0x3333 ,
200
+ .gdma_to_ppe = {
201
+ [0 ] = 0x3333 ,
202
+ [1 ] = 0x4444 ,
203
+ [2 ] = 0xcccc ,
204
+ },
196
205
.ppe_base = 0x2000 ,
197
206
.wdma_base = {
198
207
[0 ] = 0x4800 ,
@@ -2015,6 +2024,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
2015
2024
struct mtk_rx_dma_v2 * rxd , trxd ;
2016
2025
int done = 0 , bytes = 0 ;
2017
2026
dma_addr_t dma_addr = DMA_MAPPING_ERROR ;
2027
+ int ppe_idx = 0 ;
2018
2028
2019
2029
while (done < budget ) {
2020
2030
unsigned int pktlen , * rxdcsum ;
@@ -2058,6 +2068,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
2058
2068
goto release_desc ;
2059
2069
2060
2070
netdev = eth -> netdev [mac ];
2071
+ ppe_idx = eth -> mac [mac ]-> ppe_idx ;
2061
2072
2062
2073
if (unlikely (test_bit (MTK_RESETTING , & eth -> state )))
2063
2074
goto release_desc ;
@@ -2181,7 +2192,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
2181
2192
}
2182
2193
2183
2194
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED )
2184
- mtk_ppe_check_skb (eth -> ppe [0 ], skb , hash );
2195
+ mtk_ppe_check_skb (eth -> ppe [ppe_idx ], skb , hash );
2185
2196
2186
2197
skb_record_rx_queue (skb , 0 );
2187
2198
napi_gro_receive (napi , skb );
@@ -3276,37 +3287,27 @@ static int mtk_start_dma(struct mtk_eth *eth)
3276
3287
return 0 ;
3277
3288
}
3278
3289
3279
- static void mtk_gdm_config (struct mtk_eth * eth , u32 config )
3290
+ static void mtk_gdm_config (struct mtk_eth * eth , u32 id , u32 config )
3280
3291
{
3281
- int i ;
3292
+ u32 val ;
3282
3293
3283
3294
if (MTK_HAS_CAPS (eth -> soc -> caps , MTK_SOC_MT7628 ))
3284
3295
return ;
3285
3296
3286
- for (i = 0 ; i < MTK_MAX_DEVS ; i ++ ) {
3287
- u32 val ;
3288
-
3289
- if (!eth -> netdev [i ])
3290
- continue ;
3297
+ val = mtk_r32 (eth , MTK_GDMA_FWD_CFG (id ));
3291
3298
3292
- val = mtk_r32 (eth , MTK_GDMA_FWD_CFG (i ));
3299
+ /* default setup the forward port to send frame to PDMA */
3300
+ val &= ~0xffff ;
3293
3301
3294
- /* default setup the forward port to send frame to PDMA */
3295
- val &= ~ 0xffff ;
3302
+ /* Enable RX checksum */
3303
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN ;
3296
3304
3297
- /* Enable RX checksum */
3298
- val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN ;
3305
+ val |= config ;
3299
3306
3300
- val |= config ;
3307
+ if (eth -> netdev [id ] && netdev_uses_dsa (eth -> netdev [id ]))
3308
+ val |= MTK_GDMA_SPECIAL_TAG ;
3301
3309
3302
- if (netdev_uses_dsa (eth -> netdev [i ]))
3303
- val |= MTK_GDMA_SPECIAL_TAG ;
3304
-
3305
- mtk_w32 (eth , val , MTK_GDMA_FWD_CFG (i ));
3306
- }
3307
- /* Reset and enable PSE */
3308
- mtk_w32 (eth , RST_GL_PSE , MTK_RST_GL );
3309
- mtk_w32 (eth , 0 , MTK_RST_GL );
3310
+ mtk_w32 (eth , val , MTK_GDMA_FWD_CFG (id ));
3310
3311
}
3311
3312
3312
3313
@@ -3366,7 +3367,10 @@ static int mtk_open(struct net_device *dev)
3366
3367
{
3367
3368
struct mtk_mac * mac = netdev_priv (dev );
3368
3369
struct mtk_eth * eth = mac -> hw ;
3369
- int i , err ;
3370
+ struct mtk_mac * target_mac ;
3371
+ int i , err , ppe_num ;
3372
+
3373
+ ppe_num = eth -> soc -> ppe_num ;
3370
3374
3371
3375
err = phylink_of_phy_connect (mac -> phylink , mac -> of_node , 0 );
3372
3376
if (err ) {
@@ -3390,18 +3394,38 @@ static int mtk_open(struct net_device *dev)
3390
3394
for (i = 0 ; i < ARRAY_SIZE (eth -> ppe ); i ++ )
3391
3395
mtk_ppe_start (eth -> ppe [i ]);
3392
3396
3393
- gdm_config = soc -> offload_version ? soc -> reg_map -> gdma_to_ppe
3394
- : MTK_GDMA_TO_PDMA ;
3395
- mtk_gdm_config (eth , gdm_config );
3397
+ for (i = 0 ; i < MTK_MAX_DEVS ; i ++ ) {
3398
+ if (!eth -> netdev [i ])
3399
+ break ;
3400
+
3401
+ target_mac = netdev_priv (eth -> netdev [i ]);
3402
+ if (!soc -> offload_version ) {
3403
+ target_mac -> ppe_idx = 0 ;
3404
+ gdm_config = MTK_GDMA_TO_PDMA ;
3405
+ } else if (ppe_num >= 3 && target_mac -> id == 2 ) {
3406
+ target_mac -> ppe_idx = 2 ;
3407
+ gdm_config = soc -> reg_map -> gdma_to_ppe [2 ];
3408
+ } else if (ppe_num >= 2 && target_mac -> id == 1 ) {
3409
+ target_mac -> ppe_idx = 1 ;
3410
+ gdm_config = soc -> reg_map -> gdma_to_ppe [1 ];
3411
+ } else {
3412
+ target_mac -> ppe_idx = 0 ;
3413
+ gdm_config = soc -> reg_map -> gdma_to_ppe [0 ];
3414
+ }
3415
+ mtk_gdm_config (eth , target_mac -> id , gdm_config );
3416
+ }
3417
+ /* Reset and enable PSE */
3418
+ mtk_w32 (eth , RST_GL_PSE , MTK_RST_GL );
3419
+ mtk_w32 (eth , 0 , MTK_RST_GL );
3396
3420
3397
3421
napi_enable (& eth -> tx_napi );
3398
3422
napi_enable (& eth -> rx_napi );
3399
3423
mtk_tx_irq_enable (eth , MTK_TX_DONE_INT );
3400
3424
mtk_rx_irq_enable (eth , soc -> rx .irq_done_mask );
3401
3425
refcount_set (& eth -> dma_refcnt , 1 );
3402
- }
3403
- else
3426
+ } else {
3404
3427
refcount_inc (& eth -> dma_refcnt );
3428
+ }
3405
3429
3406
3430
phylink_start (mac -> phylink );
3407
3431
netif_tx_start_all_queues (dev );
@@ -3478,7 +3502,8 @@ static int mtk_stop(struct net_device *dev)
3478
3502
if (!refcount_dec_and_test (& eth -> dma_refcnt ))
3479
3503
return 0 ;
3480
3504
3481
- mtk_gdm_config (eth , MTK_GDMA_DROP_ALL );
3505
+ for (i = 0 ; i < MTK_MAX_DEVS ; i ++ )
3506
+ mtk_gdm_config (eth , i , MTK_GDMA_DROP_ALL );
3482
3507
3483
3508
mtk_tx_irq_disable (eth , MTK_TX_DONE_INT );
3484
3509
mtk_rx_irq_disable (eth , eth -> soc -> rx .irq_done_mask );
@@ -4959,23 +4984,24 @@ static int mtk_probe(struct platform_device *pdev)
4959
4984
}
4960
4985
4961
4986
if (eth -> soc -> offload_version ) {
4962
- u32 num_ppe = mtk_is_netsys_v2_or_greater ( eth ) ? 2 : 1 ;
4987
+ u8 ppe_num = eth -> soc -> ppe_num ;
4963
4988
4964
- num_ppe = min_t (u32 , ARRAY_SIZE (eth -> ppe ), num_ppe );
4965
- for (i = 0 ; i < num_ppe ; i ++ ) {
4966
- u32 ppe_addr = eth -> soc -> reg_map -> ppe_base + i * 0x400 ;
4989
+ ppe_num = min_t (u8 , ARRAY_SIZE (eth -> ppe ), ppe_num );
4990
+ for (i = 0 ; i < ppe_num ; i ++ ) {
4991
+ u32 ppe_addr = eth -> soc -> reg_map -> ppe_base ;
4967
4992
4993
+ ppe_addr += (i == 2 ? 0xc00 : i * 0x400 );
4968
4994
eth -> ppe [i ] = mtk_ppe_init (eth , eth -> base + ppe_addr , i );
4969
4995
4970
4996
if (!eth -> ppe [i ]) {
4971
4997
err = - ENOMEM ;
4972
4998
goto err_deinit_ppe ;
4973
4999
}
4974
- }
5000
+ err = mtk_eth_offload_init ( eth , i );
4975
5001
4976
- err = mtk_eth_offload_init ( eth );
4977
- if ( err )
4978
- goto err_deinit_ppe ;
5002
+ if ( err )
5003
+ goto err_deinit_ppe ;
5004
+ }
4979
5005
}
4980
5006
4981
5007
for (i = 0 ; i < MTK_MAX_DEVS ; i ++ ) {
@@ -5083,6 +5109,7 @@ static const struct mtk_soc_data mt7621_data = {
5083
5109
.required_pctl = false,
5084
5110
.version = 1 ,
5085
5111
.offload_version = 1 ,
5112
+ .ppe_num = 1 ,
5086
5113
.hash_offset = 2 ,
5087
5114
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE ,
5088
5115
.tx = {
@@ -5111,6 +5138,7 @@ static const struct mtk_soc_data mt7622_data = {
5111
5138
.required_pctl = false,
5112
5139
.version = 1 ,
5113
5140
.offload_version = 2 ,
5141
+ .ppe_num = 1 ,
5114
5142
.hash_offset = 2 ,
5115
5143
.has_accounting = true,
5116
5144
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE ,
@@ -5139,6 +5167,7 @@ static const struct mtk_soc_data mt7623_data = {
5139
5167
.required_pctl = true,
5140
5168
.version = 1 ,
5141
5169
.offload_version = 1 ,
5170
+ .ppe_num = 1 ,
5142
5171
.hash_offset = 2 ,
5143
5172
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE ,
5144
5173
.disable_pll_modes = true,
@@ -5194,6 +5223,7 @@ static const struct mtk_soc_data mt7981_data = {
5194
5223
.required_pctl = false,
5195
5224
.version = 2 ,
5196
5225
.offload_version = 2 ,
5226
+ .ppe_num = 2 ,
5197
5227
.hash_offset = 4 ,
5198
5228
.has_accounting = true,
5199
5229
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE ,
@@ -5223,6 +5253,7 @@ static const struct mtk_soc_data mt7986_data = {
5223
5253
.required_pctl = false,
5224
5254
.version = 2 ,
5225
5255
.offload_version = 2 ,
5256
+ .ppe_num = 2 ,
5226
5257
.hash_offset = 4 ,
5227
5258
.has_accounting = true,
5228
5259
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE ,
@@ -5252,6 +5283,7 @@ static const struct mtk_soc_data mt7988_data = {
5252
5283
.required_pctl = false,
5253
5284
.version = 3 ,
5254
5285
.offload_version = 2 ,
5286
+ .ppe_num = 3 ,
5255
5287
.hash_offset = 4 ,
5256
5288
.has_accounting = true,
5257
5289
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE ,
0 commit comments