@@ -78,6 +78,8 @@ struct taprio_sched {
7878 struct sched_gate_list __rcu * admin_sched ;
7979 struct hrtimer advance_timer ;
8080 struct list_head taprio_list ;
81+ u32 max_frm_len [TC_MAX_QUEUE ]; /* for the fast path */
82+ u32 max_sdu [TC_MAX_QUEUE ]; /* for dump and offloading */
8183 u32 txtime_delay ;
8284};
8385
@@ -415,6 +417,9 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
415417 struct Qdisc * child , struct sk_buff * * to_free )
416418{
417419 struct taprio_sched * q = qdisc_priv (sch );
420+ struct net_device * dev = qdisc_dev (sch );
421+ int prio = skb -> priority ;
422+ u8 tc ;
418423
419424 /* sk_flags are only safe to use on full sockets. */
420425 if (skb -> sk && sk_fullsock (skb -> sk ) && sock_flag (skb -> sk , SOCK_TXTIME )) {
@@ -426,6 +431,11 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
426431 return qdisc_drop (skb , sch , to_free );
427432 }
428433
434+ /* Devices with full offload are expected to honor this in hardware */
435+ tc = netdev_get_prio_tc_map (dev , prio );
436+ if (skb -> len > q -> max_frm_len [tc ])
437+ return qdisc_drop (skb , sch , to_free );
438+
429439 qdisc_qstats_backlog_inc (sch , skb );
430440 sch -> q .qlen ++ ;
431441
@@ -754,6 +764,11 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
754764 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL ] = { .type = NLA_U32 },
755765};
756766
767+ static const struct nla_policy taprio_tc_policy [TCA_TAPRIO_TC_ENTRY_MAX + 1 ] = {
768+ [TCA_TAPRIO_TC_ENTRY_INDEX ] = { .type = NLA_U32 },
769+ [TCA_TAPRIO_TC_ENTRY_MAX_SDU ] = { .type = NLA_U32 },
770+ };
771+
757772static const struct nla_policy taprio_policy [TCA_TAPRIO_ATTR_MAX + 1 ] = {
758773 [TCA_TAPRIO_ATTR_PRIOMAP ] = {
759774 .len = sizeof (struct tc_mqprio_qopt )
@@ -766,6 +781,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
766781 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION ] = { .type = NLA_S64 },
767782 [TCA_TAPRIO_ATTR_FLAGS ] = { .type = NLA_U32 },
768783 [TCA_TAPRIO_ATTR_TXTIME_DELAY ] = { .type = NLA_U32 },
784+ [TCA_TAPRIO_ATTR_TC_ENTRY ] = { .type = NLA_NESTED },
769785};
770786
771787static int fill_sched_entry (struct taprio_sched * q , struct nlattr * * tb ,
@@ -1216,14 +1232,28 @@ static int taprio_enable_offload(struct net_device *dev,
12161232{
12171233 const struct net_device_ops * ops = dev -> netdev_ops ;
12181234 struct tc_taprio_qopt_offload * offload ;
1219- int err = 0 ;
1235+ struct tc_taprio_caps caps ;
1236+ int tc , err = 0 ;
12201237
12211238 if (!ops -> ndo_setup_tc ) {
12221239 NL_SET_ERR_MSG (extack ,
12231240 "Device does not support taprio offload" );
12241241 return - EOPNOTSUPP ;
12251242 }
12261243
1244+ qdisc_offload_query_caps (dev , TC_SETUP_QDISC_TAPRIO ,
1245+ & caps , sizeof (caps ));
1246+
1247+ if (!caps .supports_queue_max_sdu ) {
1248+ for (tc = 0 ; tc < TC_MAX_QUEUE ; tc ++ ) {
1249+ if (q -> max_sdu [tc ]) {
1250+ NL_SET_ERR_MSG_MOD (extack ,
1251+ "Device does not handle queueMaxSDU" );
1252+ return - EOPNOTSUPP ;
1253+ }
1254+ }
1255+ }
1256+
12271257 offload = taprio_offload_alloc (sched -> num_entries );
12281258 if (!offload ) {
12291259 NL_SET_ERR_MSG (extack ,
@@ -1233,6 +1263,9 @@ static int taprio_enable_offload(struct net_device *dev,
12331263 offload -> enable = 1 ;
12341264 taprio_sched_to_offload (dev , sched , offload );
12351265
1266+ for (tc = 0 ; tc < TC_MAX_QUEUE ; tc ++ )
1267+ offload -> max_sdu [tc ] = q -> max_sdu [tc ];
1268+
12361269 err = ops -> ndo_setup_tc (dev , TC_SETUP_QDISC_TAPRIO , offload );
12371270 if (err < 0 ) {
12381271 NL_SET_ERR_MSG (extack ,
@@ -1367,6 +1400,89 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
13671400 return err ;
13681401}
13691402
1403+ static int taprio_parse_tc_entry (struct Qdisc * sch ,
1404+ struct nlattr * opt ,
1405+ u32 max_sdu [TC_QOPT_MAX_QUEUE ],
1406+ unsigned long * seen_tcs ,
1407+ struct netlink_ext_ack * extack )
1408+ {
1409+ struct nlattr * tb [TCA_TAPRIO_TC_ENTRY_MAX + 1 ] = { };
1410+ struct net_device * dev = qdisc_dev (sch );
1411+ u32 val = 0 ;
1412+ int err , tc ;
1413+
1414+ err = nla_parse_nested (tb , TCA_TAPRIO_TC_ENTRY_MAX , opt ,
1415+ taprio_tc_policy , extack );
1416+ if (err < 0 )
1417+ return err ;
1418+
1419+ if (!tb [TCA_TAPRIO_TC_ENTRY_INDEX ]) {
1420+ NL_SET_ERR_MSG_MOD (extack , "TC entry index missing" );
1421+ return - EINVAL ;
1422+ }
1423+
1424+ tc = nla_get_u32 (tb [TCA_TAPRIO_TC_ENTRY_INDEX ]);
1425+ if (tc >= TC_QOPT_MAX_QUEUE ) {
1426+ NL_SET_ERR_MSG_MOD (extack , "TC entry index out of range" );
1427+ return - ERANGE ;
1428+ }
1429+
1430+ if (* seen_tcs & BIT (tc )) {
1431+ NL_SET_ERR_MSG_MOD (extack , "Duplicate TC entry" );
1432+ return - EINVAL ;
1433+ }
1434+
1435+ * seen_tcs |= BIT (tc );
1436+
1437+ if (tb [TCA_TAPRIO_TC_ENTRY_MAX_SDU ])
1438+ val = nla_get_u32 (tb [TCA_TAPRIO_TC_ENTRY_MAX_SDU ]);
1439+
1440+ if (val > dev -> max_mtu ) {
1441+ NL_SET_ERR_MSG_MOD (extack , "TC max SDU exceeds device max MTU" );
1442+ return - ERANGE ;
1443+ }
1444+
1445+ max_sdu [tc ] = val ;
1446+
1447+ return 0 ;
1448+ }
1449+
1450+ static int taprio_parse_tc_entries (struct Qdisc * sch ,
1451+ struct nlattr * opt ,
1452+ struct netlink_ext_ack * extack )
1453+ {
1454+ struct taprio_sched * q = qdisc_priv (sch );
1455+ struct net_device * dev = qdisc_dev (sch );
1456+ u32 max_sdu [TC_QOPT_MAX_QUEUE ];
1457+ unsigned long seen_tcs = 0 ;
1458+ struct nlattr * n ;
1459+ int tc , rem ;
1460+ int err = 0 ;
1461+
1462+ for (tc = 0 ; tc < TC_QOPT_MAX_QUEUE ; tc ++ )
1463+ max_sdu [tc ] = q -> max_sdu [tc ];
1464+
1465+ nla_for_each_nested (n , opt , rem ) {
1466+ if (nla_type (n ) != TCA_TAPRIO_ATTR_TC_ENTRY )
1467+ continue ;
1468+
1469+ err = taprio_parse_tc_entry (sch , n , max_sdu , & seen_tcs , extack );
1470+ if (err )
1471+ goto out ;
1472+ }
1473+
1474+ for (tc = 0 ; tc < TC_QOPT_MAX_QUEUE ; tc ++ ) {
1475+ q -> max_sdu [tc ] = max_sdu [tc ];
1476+ if (max_sdu [tc ])
1477+ q -> max_frm_len [tc ] = max_sdu [tc ] + dev -> hard_header_len ;
1478+ else
1479+ q -> max_frm_len [tc ] = U32_MAX ; /* never oversized */
1480+ }
1481+
1482+ out :
1483+ return err ;
1484+ }
1485+
13701486static int taprio_mqprio_cmp (const struct net_device * dev ,
13711487 const struct tc_mqprio_qopt * mqprio )
13721488{
@@ -1445,6 +1561,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
14451561 if (err < 0 )
14461562 return err ;
14471563
1564+ err = taprio_parse_tc_entries (sch , opt , extack );
1565+ if (err )
1566+ return err ;
1567+
14481568 new_admin = kzalloc (sizeof (* new_admin ), GFP_KERNEL );
14491569 if (!new_admin ) {
14501570 NL_SET_ERR_MSG (extack , "Not enough memory for a new schedule" );
@@ -1825,6 +1945,33 @@ static int dump_schedule(struct sk_buff *msg,
18251945 return -1 ;
18261946}
18271947
1948+ static int taprio_dump_tc_entries (struct taprio_sched * q , struct sk_buff * skb )
1949+ {
1950+ struct nlattr * n ;
1951+ int tc ;
1952+
1953+ for (tc = 0 ; tc < TC_MAX_QUEUE ; tc ++ ) {
1954+ n = nla_nest_start (skb , TCA_TAPRIO_ATTR_TC_ENTRY );
1955+ if (!n )
1956+ return - EMSGSIZE ;
1957+
1958+ if (nla_put_u32 (skb , TCA_TAPRIO_TC_ENTRY_INDEX , tc ))
1959+ goto nla_put_failure ;
1960+
1961+ if (nla_put_u32 (skb , TCA_TAPRIO_TC_ENTRY_MAX_SDU ,
1962+ q -> max_sdu [tc ]))
1963+ goto nla_put_failure ;
1964+
1965+ nla_nest_end (skb , n );
1966+ }
1967+
1968+ return 0 ;
1969+
1970+ nla_put_failure :
1971+ nla_nest_cancel (skb , n );
1972+ return - EMSGSIZE ;
1973+ }
1974+
18281975static int taprio_dump (struct Qdisc * sch , struct sk_buff * skb )
18291976{
18301977 struct taprio_sched * q = qdisc_priv (sch );
@@ -1863,6 +2010,9 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
18632010 nla_put_u32 (skb , TCA_TAPRIO_ATTR_TXTIME_DELAY , q -> txtime_delay ))
18642011 goto options_error ;
18652012
2013+ if (taprio_dump_tc_entries (q , skb ))
2014+ goto options_error ;
2015+
18662016 if (oper && dump_schedule (skb , oper ))
18672017 goto options_error ;
18682018
0 commit comments