Skip to content

Commit 57e661a

Browse files
wkzkuba-moo
authored andcommitted
net: dsa: mv88e6xxx: Link aggregation support
Support offloading of LAGs to hardware. LAGs may be attached to a bridge in which case VLANs, multicast groups, etc. are also offloaded as usual. Signed-off-by: Tobias Waldekranz <[email protected]> Reviewed-by: Vladimir Oltean <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 058102a commit 57e661a

File tree

5 files changed

+330
-5
lines changed

5 files changed

+330
-5
lines changed

drivers/net/dsa/mv88e6xxx/chip.c

Lines changed: 295 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1396,15 +1396,32 @@ static int mv88e6xxx_mac_setup(struct mv88e6xxx_chip *chip)
13961396

13971397
static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
13981398
{
1399+
struct dsa_switch_tree *dst = chip->ds->dst;
1400+
struct dsa_switch *ds;
1401+
struct dsa_port *dp;
13991402
u16 pvlan = 0;
14001403

14011404
if (!mv88e6xxx_has_pvt(chip))
14021405
return 0;
14031406

14041407
/* Skip the local source device, which uses in-chip port VLAN */
1405-
if (dev != chip->ds->index)
1408+
if (dev != chip->ds->index) {
14061409
pvlan = mv88e6xxx_port_vlan(chip, dev, port);
14071410

1411+
ds = dsa_switch_find(dst->index, dev);
1412+
dp = ds ? dsa_to_port(ds, port) : NULL;
1413+
if (dp && dp->lag_dev) {
1414+
/* As the PVT is used to limit flooding of
1415+
* FORWARD frames, which use the LAG ID as the
1416+
* source port, we must translate dev/port to
1417+
* the special "LAG device" in the PVT, using
1418+
* the LAG ID as the port number.
1419+
*/
1420+
dev = MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK;
1421+
port = dsa_lag_id(dst, dp->lag_dev);
1422+
}
1423+
}
1424+
14081425
return mv88e6xxx_g2_pvt_write(chip, dev, port, pvlan);
14091426
}
14101427

@@ -5364,6 +5381,271 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
53645381
return err;
53655382
}
53665383

5384+
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
5385+
struct net_device *lag,
5386+
struct netdev_lag_upper_info *info)
5387+
{
5388+
struct dsa_port *dp;
5389+
int id, members = 0;
5390+
5391+
id = dsa_lag_id(ds->dst, lag);
5392+
if (id < 0 || id >= ds->num_lag_ids)
5393+
return false;
5394+
5395+
dsa_lag_foreach_port(dp, ds->dst, lag)
5396+
/* Includes the port joining the LAG */
5397+
members++;
5398+
5399+
if (members > 8)
5400+
return false;
5401+
5402+
/* We could potentially relax this to include active
5403+
* backup in the future.
5404+
*/
5405+
if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
5406+
return false;
5407+
5408+
/* Ideally we would also validate that the hash type matches
5409+
* the hardware. Alas, this is always set to unknown on team
5410+
* interfaces.
5411+
*/
5412+
return true;
5413+
}
5414+
5415+
static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
5416+
{
5417+
struct mv88e6xxx_chip *chip = ds->priv;
5418+
struct dsa_port *dp;
5419+
u16 map = 0;
5420+
int id;
5421+
5422+
id = dsa_lag_id(ds->dst, lag);
5423+
5424+
/* Build the map of all ports to distribute flows destined for
5425+
* this LAG. This can be either a local user port, or a DSA
5426+
* port if the LAG port is on a remote chip.
5427+
*/
5428+
dsa_lag_foreach_port(dp, ds->dst, lag)
5429+
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
5430+
5431+
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
5432+
}
5433+
5434+
static const u8 mv88e6xxx_lag_mask_table[8][8] = {
5435+
/* Row number corresponds to the number of active members in a
5436+
* LAG. Each column states which of the eight hash buckets are
5437+
* mapped to the column:th port in the LAG.
5438+
*
5439+
* Example: In a LAG with three active ports, the second port
5440+
* ([2][1]) would be selected for traffic mapped to buckets
5441+
* 3,4,5 (0x38).
5442+
*/
5443+
{ 0xff, 0, 0, 0, 0, 0, 0, 0 },
5444+
{ 0x0f, 0xf0, 0, 0, 0, 0, 0, 0 },
5445+
{ 0x07, 0x38, 0xc0, 0, 0, 0, 0, 0 },
5446+
{ 0x03, 0x0c, 0x30, 0xc0, 0, 0, 0, 0 },
5447+
{ 0x03, 0x0c, 0x30, 0x40, 0x80, 0, 0, 0 },
5448+
{ 0x03, 0x0c, 0x10, 0x20, 0x40, 0x80, 0, 0 },
5449+
{ 0x03, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0 },
5450+
{ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 },
5451+
};
5452+
5453+
static void mv88e6xxx_lag_set_port_mask(u16 *mask, int port,
5454+
int num_tx, int nth)
5455+
{
5456+
u8 active = 0;
5457+
int i;
5458+
5459+
num_tx = num_tx <= 8 ? num_tx : 8;
5460+
if (nth < num_tx)
5461+
active = mv88e6xxx_lag_mask_table[num_tx - 1][nth];
5462+
5463+
for (i = 0; i < 8; i++) {
5464+
if (BIT(i) & active)
5465+
mask[i] |= BIT(port);
5466+
}
5467+
}
5468+
5469+
static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
5470+
{
5471+
struct mv88e6xxx_chip *chip = ds->priv;
5472+
unsigned int id, num_tx;
5473+
struct net_device *lag;
5474+
struct dsa_port *dp;
5475+
int i, err, nth;
5476+
u16 mask[8];
5477+
u16 ivec;
5478+
5479+
/* Assume no port is a member of any LAG. */
5480+
ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
5481+
5482+
/* Disable all masks for ports that _are_ members of a LAG. */
5483+
list_for_each_entry(dp, &ds->dst->ports, list) {
5484+
if (!dp->lag_dev || dp->ds != ds)
5485+
continue;
5486+
5487+
ivec &= ~BIT(dp->index);
5488+
}
5489+
5490+
for (i = 0; i < 8; i++)
5491+
mask[i] = ivec;
5492+
5493+
/* Enable the correct subset of masks for all LAG ports that
5494+
* are in the Tx set.
5495+
*/
5496+
dsa_lags_foreach_id(id, ds->dst) {
5497+
lag = dsa_lag_dev(ds->dst, id);
5498+
if (!lag)
5499+
continue;
5500+
5501+
num_tx = 0;
5502+
dsa_lag_foreach_port(dp, ds->dst, lag) {
5503+
if (dp->lag_tx_enabled)
5504+
num_tx++;
5505+
}
5506+
5507+
if (!num_tx)
5508+
continue;
5509+
5510+
nth = 0;
5511+
dsa_lag_foreach_port(dp, ds->dst, lag) {
5512+
if (!dp->lag_tx_enabled)
5513+
continue;
5514+
5515+
if (dp->ds == ds)
5516+
mv88e6xxx_lag_set_port_mask(mask, dp->index,
5517+
num_tx, nth);
5518+
5519+
nth++;
5520+
}
5521+
}
5522+
5523+
for (i = 0; i < 8; i++) {
5524+
err = mv88e6xxx_g2_trunk_mask_write(chip, i, true, mask[i]);
5525+
if (err)
5526+
return err;
5527+
}
5528+
5529+
return 0;
5530+
}
5531+
5532+
static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
5533+
struct net_device *lag)
5534+
{
5535+
int err;
5536+
5537+
err = mv88e6xxx_lag_sync_masks(ds);
5538+
5539+
if (!err)
5540+
err = mv88e6xxx_lag_sync_map(ds, lag);
5541+
5542+
return err;
5543+
}
5544+
5545+
static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
5546+
{
5547+
struct mv88e6xxx_chip *chip = ds->priv;
5548+
int err;
5549+
5550+
mv88e6xxx_reg_lock(chip);
5551+
err = mv88e6xxx_lag_sync_masks(ds);
5552+
mv88e6xxx_reg_unlock(chip);
5553+
return err;
5554+
}
5555+
5556+
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
5557+
struct net_device *lag,
5558+
struct netdev_lag_upper_info *info)
5559+
{
5560+
struct mv88e6xxx_chip *chip = ds->priv;
5561+
int err, id;
5562+
5563+
if (!mv88e6xxx_lag_can_offload(ds, lag, info))
5564+
return -EOPNOTSUPP;
5565+
5566+
id = dsa_lag_id(ds->dst, lag);
5567+
5568+
mv88e6xxx_reg_lock(chip);
5569+
5570+
err = mv88e6xxx_port_set_trunk(chip, port, true, id);
5571+
if (err)
5572+
goto err_unlock;
5573+
5574+
err = mv88e6xxx_lag_sync_masks_map(ds, lag);
5575+
if (err)
5576+
goto err_clear_trunk;
5577+
5578+
mv88e6xxx_reg_unlock(chip);
5579+
return 0;
5580+
5581+
err_clear_trunk:
5582+
mv88e6xxx_port_set_trunk(chip, port, false, 0);
5583+
err_unlock:
5584+
mv88e6xxx_reg_unlock(chip);
5585+
return err;
5586+
}
5587+
5588+
static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
5589+
struct net_device *lag)
5590+
{
5591+
struct mv88e6xxx_chip *chip = ds->priv;
5592+
int err_sync, err_trunk;
5593+
5594+
mv88e6xxx_reg_lock(chip);
5595+
err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
5596+
err_trunk = mv88e6xxx_port_set_trunk(chip, port, false, 0);
5597+
mv88e6xxx_reg_unlock(chip);
5598+
return err_sync ? : err_trunk;
5599+
}
5600+
5601+
static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
5602+
int port)
5603+
{
5604+
struct mv88e6xxx_chip *chip = ds->priv;
5605+
int err;
5606+
5607+
mv88e6xxx_reg_lock(chip);
5608+
err = mv88e6xxx_lag_sync_masks(ds);
5609+
mv88e6xxx_reg_unlock(chip);
5610+
return err;
5611+
}
5612+
5613+
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
5614+
int port, struct net_device *lag,
5615+
struct netdev_lag_upper_info *info)
5616+
{
5617+
struct mv88e6xxx_chip *chip = ds->priv;
5618+
int err;
5619+
5620+
if (!mv88e6xxx_lag_can_offload(ds, lag, info))
5621+
return -EOPNOTSUPP;
5622+
5623+
mv88e6xxx_reg_lock(chip);
5624+
5625+
err = mv88e6xxx_lag_sync_masks_map(ds, lag);
5626+
if (err)
5627+
goto unlock;
5628+
5629+
err = mv88e6xxx_pvt_map(chip, sw_index, port);
5630+
5631+
unlock:
5632+
mv88e6xxx_reg_unlock(chip);
5633+
return err;
5634+
}
5635+
5636+
static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
5637+
int port, struct net_device *lag)
5638+
{
5639+
struct mv88e6xxx_chip *chip = ds->priv;
5640+
int err_sync, err_pvt;
5641+
5642+
mv88e6xxx_reg_lock(chip);
5643+
err_sync = mv88e6xxx_lag_sync_masks_map(ds, lag);
5644+
err_pvt = mv88e6xxx_pvt_map(chip, sw_index, port);
5645+
mv88e6xxx_reg_unlock(chip);
5646+
return err_sync ? : err_pvt;
5647+
}
5648+
53675649
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
53685650
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
53695651
.setup = mv88e6xxx_setup,
@@ -5416,6 +5698,12 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
54165698
.devlink_param_get = mv88e6xxx_devlink_param_get,
54175699
.devlink_param_set = mv88e6xxx_devlink_param_set,
54185700
.devlink_info_get = mv88e6xxx_devlink_info_get,
5701+
.port_lag_change = mv88e6xxx_port_lag_change,
5702+
.port_lag_join = mv88e6xxx_port_lag_join,
5703+
.port_lag_leave = mv88e6xxx_port_lag_leave,
5704+
.crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
5705+
.crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
5706+
.crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
54195707
};
54205708

54215709
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
@@ -5435,6 +5723,12 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
54355723
ds->ageing_time_min = chip->info->age_time_coeff;
54365724
ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
54375725

5726+
/* Some chips support up to 32, but that requires enabling the
5727+
* 5-bit port mode, which we do not support. 640k^W16 ought to
5728+
* be enough for anyone.
5729+
*/
5730+
ds->num_lag_ids = 16;
5731+
54385732
dev_set_drvdata(dev, ds);
54395733

54405734
return dsa_register_switch(ds);

drivers/net/dsa/mv88e6xxx/global2.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -126,8 +126,8 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
126126

127127
/* Offset 0x07: Trunk Mask Table register */
128128

129-
static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
130-
bool hash, u16 mask)
129+
int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
130+
bool hash, u16 mask)
131131
{
132132
u16 val = (num << 12) | (mask & mv88e6xxx_port_mask(chip));
133133

@@ -140,8 +140,8 @@ static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
140140

141141
/* Offset 0x08: Trunk Mapping Table register */
142142

143-
static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
144-
u16 map)
143+
int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
144+
u16 map)
145145
{
146146
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
147147
u16 val = (id << 11) | (map & port_mask);

drivers/net/dsa/mv88e6xxx/global2.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,7 @@
101101
#define MV88E6XXX_G2_PVT_ADDR_OP_WRITE_PVLAN 0x3000
102102
#define MV88E6XXX_G2_PVT_ADDR_OP_READ 0x4000
103103
#define MV88E6XXX_G2_PVT_ADDR_PTR_MASK 0x01ff
104+
#define MV88E6XXX_G2_PVT_ADRR_DEV_TRUNK 0x1f
104105

105106
/* Offset 0x0C: Cross-chip Port VLAN Data Register */
106107
#define MV88E6XXX_G2_PVT_DATA 0x0c
@@ -345,6 +346,10 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
345346

346347
int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip);
347348

349+
int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
350+
bool hash, u16 mask);
351+
int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
352+
u16 map);
348353
int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
349354

350355
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,

0 commit comments

Comments
 (0)