Skip to content

Commit a21ecf0

Browse files
Era Mayflowerdavem330
authored andcommitted
macsec: Support XPN frame handling - IEEE 802.1AEbw
Support extended packet number cipher suites (802.1AEbw) frames handling. This does not include the needed netlink patches. * Added xpn boolean field to `struct macsec_secy`. * Added ssci field to `struct_macsec_tx_sa` (802.1AE figure 10-5). * Added ssci field to `struct_macsec_rx_sa` (802.1AE figure 10-5). * Added salt field to `struct macsec_key` (802.1AE 10.7 NOTE 1). * Created pn_t type for easy access to lower and upper halves. * Created salt_t type for easy access to the "ssci" and "pn" parts. * Created `macsec_fill_iv_xpn` function to create IV in XPN mode. * Support in PN recovery and preliminary replay check in XPN mode. In addition, according to IEEE 802.1AEbw figure 10-5, the PN of incoming frame can be 0 when XPN cipher suite is used, so fixed the function `macsec_validate_skb` to fail on PN=0 only if XPN is off. Signed-off-by: Era Mayflower <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 65b7a2c commit a21ecf0

File tree

2 files changed

+136
-39
lines changed

2 files changed

+136
-39
lines changed

drivers/net/macsec.c

Lines changed: 94 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <net/gro_cells.h>
2020
#include <net/macsec.h>
2121
#include <linux/phy.h>
22+
#include <linux/byteorder/generic.h>
2223

2324
#include <uapi/linux/if_macsec.h>
2425

@@ -68,6 +69,16 @@ struct macsec_eth_header {
6869
sc; \
6970
sc = rtnl_dereference(sc->next))
7071

72+
#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
73+
74+
struct gcm_iv_xpn {
75+
union {
76+
u8 short_secure_channel_id[4];
77+
ssci_t ssci;
78+
};
79+
__be64 pn;
80+
} __packed;
81+
7182
struct gcm_iv {
7283
union {
7384
u8 secure_channel_id[8];
@@ -372,8 +383,8 @@ static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
372383
return __macsec_get_ops(macsec->offload, macsec, ctx);
373384
}
374385

375-
/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
376-
static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
386+
/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
387+
static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
377388
{
378389
struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
379390
int len = skb->len - 2 * ETH_ALEN;
@@ -398,8 +409,8 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
398409
if (h->unused)
399410
return false;
400411

401-
/* rx.pn != 0 (figure 10-5) */
402-
if (!h->packet_number)
412+
/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
413+
if (!h->packet_number && !xpn)
403414
return false;
404415

405416
/* length check, f) g) h) i) */
@@ -411,6 +422,15 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
411422
#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
412423
#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
413424

425+
static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
426+
salt_t salt)
427+
{
428+
struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
429+
430+
gcm_iv->ssci = ssci ^ salt.ssci;
431+
gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
432+
}
433+
414434
static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
415435
{
416436
struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
@@ -446,14 +466,19 @@ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
446466
}
447467
EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
448468

449-
static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
469+
static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
470+
struct macsec_secy *secy)
450471
{
451-
u32 pn;
472+
pn_t pn;
452473

453474
spin_lock_bh(&tx_sa->lock);
454-
pn = tx_sa->next_pn;
455475

456-
tx_sa->next_pn++;
476+
pn = tx_sa->next_pn_halves;
477+
if (secy->xpn)
478+
tx_sa->next_pn++;
479+
else
480+
tx_sa->next_pn_halves.lower++;
481+
457482
if (tx_sa->next_pn == 0)
458483
__macsec_pn_wrapped(secy, tx_sa);
459484
spin_unlock_bh(&tx_sa->lock);
@@ -568,7 +593,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
568593
struct macsec_tx_sa *tx_sa;
569594
struct macsec_dev *macsec = macsec_priv(dev);
570595
bool sci_present;
571-
u32 pn;
596+
pn_t pn;
572597

573598
secy = &macsec->secy;
574599
tx_sc = &secy->tx_sc;
@@ -610,12 +635,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
610635
memmove(hh, eth, 2 * ETH_ALEN);
611636

612637
pn = tx_sa_update_pn(tx_sa, secy);
613-
if (pn == 0) {
638+
if (pn.full64 == 0) {
614639
macsec_txsa_put(tx_sa);
615640
kfree_skb(skb);
616641
return ERR_PTR(-ENOLINK);
617642
}
618-
macsec_fill_sectag(hh, secy, pn, sci_present);
643+
macsec_fill_sectag(hh, secy, pn.lower, sci_present);
619644
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
620645

621646
skb_put(skb, secy->icv_len);
@@ -646,7 +671,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
646671
return ERR_PTR(-ENOMEM);
647672
}
648673

649-
macsec_fill_iv(iv, secy->sci, pn);
674+
if (secy->xpn)
675+
macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
676+
else
677+
macsec_fill_iv(iv, secy->sci, pn.lower);
650678

651679
sg_init_table(sg, ret);
652680
ret = skb_to_sgvec(skb, sg, 0, skb->len);
@@ -698,13 +726,14 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
698726
u32 lowest_pn = 0;
699727

700728
spin_lock(&rx_sa->lock);
701-
if (rx_sa->next_pn >= secy->replay_window)
702-
lowest_pn = rx_sa->next_pn - secy->replay_window;
729+
if (rx_sa->next_pn_halves.lower >= secy->replay_window)
730+
lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
703731

704732
/* Now perform replay protection check again
705733
* (see IEEE 802.1AE-2006 figure 10-5)
706734
*/
707-
if (secy->replay_protect && pn < lowest_pn) {
735+
if (secy->replay_protect && pn < lowest_pn &&
736+
(!secy->xpn || pn_same_half(pn, lowest_pn))) {
708737
spin_unlock(&rx_sa->lock);
709738
u64_stats_update_begin(&rxsc_stats->syncp);
710739
rxsc_stats->stats.InPktsLate++;
@@ -753,8 +782,15 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
753782
}
754783
u64_stats_update_end(&rxsc_stats->syncp);
755784

756-
if (pn >= rx_sa->next_pn)
757-
rx_sa->next_pn = pn + 1;
785+
// Instead of "pn >=" - to support pn overflow in xpn
786+
if (pn + 1 > rx_sa->next_pn_halves.lower) {
787+
rx_sa->next_pn_halves.lower = pn + 1;
788+
} else if (secy->xpn &&
789+
!pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
790+
rx_sa->next_pn_halves.upper++;
791+
rx_sa->next_pn_halves.lower = pn + 1;
792+
}
793+
758794
spin_unlock(&rx_sa->lock);
759795
}
760796

@@ -841,6 +877,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
841877
unsigned char *iv;
842878
struct aead_request *req;
843879
struct macsec_eth_header *hdr;
880+
u32 hdr_pn;
844881
u16 icv_len = secy->icv_len;
845882

846883
macsec_skb_cb(skb)->valid = false;
@@ -860,7 +897,21 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
860897
}
861898

862899
hdr = (struct macsec_eth_header *)skb->data;
863-
macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
900+
hdr_pn = ntohl(hdr->packet_number);
901+
902+
if (secy->xpn) {
903+
pn_t recovered_pn = rx_sa->next_pn_halves;
904+
905+
recovered_pn.lower = hdr_pn;
906+
if (hdr_pn < rx_sa->next_pn_halves.lower &&
907+
!pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
908+
recovered_pn.upper++;
909+
910+
macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
911+
rx_sa->key.salt);
912+
} else {
913+
macsec_fill_iv(iv, sci, hdr_pn);
914+
}
864915

865916
sg_init_table(sg, ret);
866917
ret = skb_to_sgvec(skb, sg, 0, skb->len);
@@ -1001,7 +1052,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
10011052
struct macsec_rxh_data *rxd;
10021053
struct macsec_dev *macsec;
10031054
sci_t sci;
1004-
u32 pn;
1055+
u32 hdr_pn;
10051056
bool cbit;
10061057
struct pcpu_rx_sc_stats *rxsc_stats;
10071058
struct pcpu_secy_stats *secy_stats;
@@ -1072,7 +1123,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
10721123
secy_stats = this_cpu_ptr(macsec->stats);
10731124
rxsc_stats = this_cpu_ptr(rx_sc->stats);
10741125

1075-
if (!macsec_validate_skb(skb, secy->icv_len)) {
1126+
if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
10761127
u64_stats_update_begin(&secy_stats->syncp);
10771128
secy_stats->stats.InPktsBadTag++;
10781129
u64_stats_update_end(&secy_stats->syncp);
@@ -1104,13 +1155,16 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
11041155
}
11051156

11061157
/* First, PN check to avoid decrypting obviously wrong packets */
1107-
pn = ntohl(hdr->packet_number);
1158+
hdr_pn = ntohl(hdr->packet_number);
11081159
if (secy->replay_protect) {
11091160
bool late;
11101161

11111162
spin_lock(&rx_sa->lock);
1112-
late = rx_sa->next_pn >= secy->replay_window &&
1113-
pn < (rx_sa->next_pn - secy->replay_window);
1163+
late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1164+
hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1165+
1166+
if (secy->xpn)
1167+
late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
11141168
spin_unlock(&rx_sa->lock);
11151169

11161170
if (late) {
@@ -1139,7 +1193,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
11391193
return RX_HANDLER_CONSUMED;
11401194
}
11411195

1142-
if (!macsec_post_decrypt(skb, secy, pn))
1196+
if (!macsec_post_decrypt(skb, secy, hdr_pn))
11431197
goto drop;
11441198

11451199
deliver:
@@ -1666,7 +1720,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
16661720

16671721
if (tb_sa[MACSEC_SA_ATTR_PN]) {
16681722
spin_lock_bh(&rx_sa->lock);
1669-
rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1723+
rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
16701724
spin_unlock_bh(&rx_sa->lock);
16711725
}
16721726

@@ -1873,7 +1927,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
18731927
}
18741928

18751929
spin_lock_bh(&tx_sa->lock);
1876-
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
1930+
tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
18771931
spin_unlock_bh(&tx_sa->lock);
18781932

18791933
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
@@ -2137,9 +2191,11 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
21372191
u8 assoc_num;
21382192
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
21392193
bool was_operational, was_active;
2140-
u32 prev_pn = 0;
2194+
pn_t prev_pn;
21412195
int ret = 0;
21422196

2197+
prev_pn.full64 = 0;
2198+
21432199
if (!attrs[MACSEC_ATTR_IFINDEX])
21442200
return -EINVAL;
21452201

@@ -2159,8 +2215,8 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
21592215

21602216
if (tb_sa[MACSEC_SA_ATTR_PN]) {
21612217
spin_lock_bh(&tx_sa->lock);
2162-
prev_pn = tx_sa->next_pn;
2163-
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
2218+
prev_pn = tx_sa->next_pn_halves;
2219+
tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
21642220
spin_unlock_bh(&tx_sa->lock);
21652221
}
21662222

@@ -2198,7 +2254,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
21982254
cleanup:
21992255
if (tb_sa[MACSEC_SA_ATTR_PN]) {
22002256
spin_lock_bh(&tx_sa->lock);
2201-
tx_sa->next_pn = prev_pn;
2257+
tx_sa->next_pn_halves = prev_pn;
22022258
spin_unlock_bh(&tx_sa->lock);
22032259
}
22042260
tx_sa->active = was_active;
@@ -2218,9 +2274,11 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
22182274
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
22192275
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
22202276
bool was_active;
2221-
u32 prev_pn = 0;
2277+
pn_t prev_pn;
22222278
int ret = 0;
22232279

2280+
prev_pn.full64 = 0;
2281+
22242282
if (!attrs[MACSEC_ATTR_IFINDEX])
22252283
return -EINVAL;
22262284

@@ -2243,8 +2301,8 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
22432301

22442302
if (tb_sa[MACSEC_SA_ATTR_PN]) {
22452303
spin_lock_bh(&rx_sa->lock);
2246-
prev_pn = rx_sa->next_pn;
2247-
rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
2304+
prev_pn = rx_sa->next_pn_halves;
2305+
rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
22482306
spin_unlock_bh(&rx_sa->lock);
22492307
}
22502308

@@ -2277,7 +2335,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
22772335
cleanup:
22782336
if (tb_sa[MACSEC_SA_ATTR_PN]) {
22792337
spin_lock_bh(&rx_sa->lock);
2280-
rx_sa->next_pn = prev_pn;
2338+
rx_sa->next_pn_halves = prev_pn;
22812339
spin_unlock_bh(&rx_sa->lock);
22822340
}
22832341
rx_sa->active = was_active;
@@ -2796,7 +2854,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
27962854
}
27972855

27982856
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2799-
nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
2857+
nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn_halves.lower) ||
28002858
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
28012859
nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
28022860
nla_nest_cancel(skb, txsa_nest);
@@ -2900,7 +2958,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
29002958
nla_nest_end(skb, attr);
29012959

29022960
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
2903-
nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
2961+
nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn_halves.lower) ||
29042962
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
29052963
nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
29062964
nla_nest_cancel(skb, rxsa_nest);

0 commit comments

Comments
 (0)