Skip to content

Commit 8362ea1

Browse files
chelsiocryptoherbertx
authored andcommitted
crypto: chcr - ESN for Inline IPSec Tx
Send SPI, 64b seq nos and 64b IV with aadiv drop for inline crypto. This information is added in outgoing packet after the CPL TX PKT XT and removed by hardware. The aad, auth and cipher offsets are then adjusted for ESN enabled tunnel. Signed-off-by: Atul Gupta <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent c35828e commit 8362ea1

File tree

2 files changed

+148
-36
lines changed

2 files changed

+148
-36
lines changed

drivers/crypto/chelsio/chcr_core.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,8 +159,17 @@ struct chcr_ipsec_wr {
159159
struct chcr_ipsec_req req;
160160
};
161161

162+
#define ESN_IV_INSERT_OFFSET 12
163+
struct chcr_ipsec_aadiv {
164+
__be32 spi;
165+
u8 seq_no[8];
166+
u8 iv[8];
167+
};
168+
162169
struct ipsec_sa_entry {
163170
int hmac_ctrl;
171+
u16 esn;
172+
u16 imm;
164173
unsigned int enckey_len;
165174
unsigned int kctx_len;
166175
unsigned int authsize;

drivers/crypto/chelsio/chcr_ipsec.c

Lines changed: 139 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -76,12 +76,14 @@ static int chcr_xfrm_add_state(struct xfrm_state *x);
7676
static void chcr_xfrm_del_state(struct xfrm_state *x);
7777
static void chcr_xfrm_free_state(struct xfrm_state *x);
7878
static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
79+
static void chcr_advance_esn_state(struct xfrm_state *x);
7980

8081
static const struct xfrmdev_ops chcr_xfrmdev_ops = {
8182
.xdo_dev_state_add = chcr_xfrm_add_state,
8283
.xdo_dev_state_delete = chcr_xfrm_del_state,
8384
.xdo_dev_state_free = chcr_xfrm_free_state,
8485
.xdo_dev_offload_ok = chcr_ipsec_offload_ok,
86+
.xdo_dev_state_advance_esn = chcr_advance_esn_state,
8587
};
8688

8789
/* Add offload xfrms to Chelsio Interface */
@@ -210,10 +212,6 @@ static int chcr_xfrm_add_state(struct xfrm_state *x)
210212
pr_debug("CHCR: Cannot offload compressed xfrm states\n");
211213
return -EINVAL;
212214
}
213-
if (x->props.flags & XFRM_STATE_ESN) {
214-
pr_debug("CHCR: Cannot offload ESN xfrm states\n");
215-
return -EINVAL;
216-
}
217215
if (x->props.family != AF_INET &&
218216
x->props.family != AF_INET6) {
219217
pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
@@ -266,6 +264,8 @@ static int chcr_xfrm_add_state(struct xfrm_state *x)
266264
}
267265

268266
sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
267+
if (x->props.flags & XFRM_STATE_ESN)
268+
sa_entry->esn = 1;
269269
chcr_ipsec_setkey(x, sa_entry);
270270
x->xso.offload_handle = (unsigned long)sa_entry;
271271
try_module_get(THIS_MODULE);
@@ -294,31 +294,57 @@ static void chcr_xfrm_free_state(struct xfrm_state *x)
294294

295295
static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
296296
{
297-
/* Offload with IP options is not supported yet */
298-
if (ip_hdr(skb)->ihl > 5)
299-
return false;
300-
297+
if (x->props.family == AF_INET) {
298+
/* Offload with IP options is not supported yet */
299+
if (ip_hdr(skb)->ihl > 5)
300+
return false;
301+
} else {
302+
/* Offload with IPv6 extension headers is not support yet */
303+
if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
304+
return false;
305+
}
301306
return true;
302307
}
303308

304-
static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
309+
static void chcr_advance_esn_state(struct xfrm_state *x)
310+
{
311+
/* do nothing */
312+
if (!x->xso.offload_handle)
313+
return;
314+
}
315+
316+
static inline int is_eth_imm(const struct sk_buff *skb,
317+
struct ipsec_sa_entry *sa_entry)
305318
{
319+
unsigned int kctx_len;
306320
int hdrlen;
307321

322+
kctx_len = sa_entry->kctx_len;
308323
hdrlen = sizeof(struct fw_ulptx_wr) +
309324
sizeof(struct chcr_ipsec_req) + kctx_len;
310325

311326
hdrlen += sizeof(struct cpl_tx_pkt);
327+
if (sa_entry->esn)
328+
hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
329+
<< 4);
312330
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
313331
return hdrlen;
314332
return 0;
315333
}
316334

317335
static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
318-
unsigned int kctx_len)
336+
struct ipsec_sa_entry *sa_entry)
319337
{
338+
unsigned int kctx_len;
320339
unsigned int flits;
321-
int hdrlen = is_eth_imm(skb, kctx_len);
340+
int aadivlen;
341+
int hdrlen;
342+
343+
kctx_len = sa_entry->kctx_len;
344+
hdrlen = is_eth_imm(skb, sa_entry);
345+
aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
346+
16) : 0;
347+
aadivlen <<= 4;
322348

323349
/* If the skb is small enough, we can pump it out as a work request
324350
* with only immediate data. In that case we just have to have the
@@ -341,13 +367,69 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
341367
flits += (sizeof(struct fw_ulptx_wr) +
342368
sizeof(struct chcr_ipsec_req) +
343369
kctx_len +
344-
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
370+
sizeof(struct cpl_tx_pkt_core) +
371+
aadivlen) / sizeof(__be64);
345372
return flits;
346373
}
347374

375+
inline void *copy_esn_pktxt(struct sk_buff *skb,
376+
struct net_device *dev,
377+
void *pos,
378+
struct ipsec_sa_entry *sa_entry)
379+
{
380+
struct chcr_ipsec_aadiv *aadiv;
381+
struct ulptx_idata *sc_imm;
382+
struct ip_esp_hdr *esphdr;
383+
struct xfrm_offload *xo;
384+
struct sge_eth_txq *q;
385+
struct adapter *adap;
386+
struct port_info *pi;
387+
__be64 seqno;
388+
u32 qidx;
389+
u32 seqlo;
390+
u8 *iv;
391+
int eoq;
392+
int len;
393+
394+
pi = netdev_priv(dev);
395+
adap = pi->adapter;
396+
qidx = skb->queue_mapping;
397+
q = &adap->sge.ethtxq[qidx + pi->first_qset];
398+
399+
/* end of queue, reset pos to start of queue */
400+
eoq = (void *)q->q.stat - pos;
401+
if (!eoq)
402+
pos = q->q.desc;
403+
404+
len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
405+
memset(pos, 0, len);
406+
aadiv = (struct chcr_ipsec_aadiv *)pos;
407+
esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
408+
iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
409+
xo = xfrm_offload(skb);
410+
411+
aadiv->spi = (esphdr->spi);
412+
seqlo = htonl(esphdr->seq_no);
413+
seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
414+
memcpy(aadiv->seq_no, &seqno, 8);
415+
iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
416+
memcpy(aadiv->iv, iv, 8);
417+
418+
if (sa_entry->imm) {
419+
sc_imm = (struct ulptx_idata *)(pos +
420+
(DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
421+
sizeof(__be64)) << 3));
422+
sc_imm->cmd_more = FILL_CMD_MORE(!sa_entry->imm);
423+
sc_imm->len = cpu_to_be32(sa_entry->imm);
424+
}
425+
pos += len;
426+
return pos;
427+
}
428+
348429
inline void *copy_cpltx_pktxt(struct sk_buff *skb,
349-
struct net_device *dev,
350-
void *pos)
430+
struct net_device *dev,
431+
void *pos,
432+
struct ipsec_sa_entry *sa_entry)
351433
{
352434
struct cpl_tx_pkt_core *cpl;
353435
struct sge_eth_txq *q;
@@ -382,6 +464,9 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
382464
cpl->ctrl1 = cpu_to_be64(cntrl);
383465

384466
pos += sizeof(struct cpl_tx_pkt_core);
467+
/* Copy ESN info for HW */
468+
if (sa_entry->esn)
469+
pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
385470
return pos;
386471
}
387472

@@ -428,7 +513,7 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
428513
pos = (u8 *)q->q.desc + (key_len - left);
429514
}
430515
/* Copy CPL TX PKT XT */
431-
pos = copy_cpltx_pktxt(skb, dev, pos);
516+
pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
432517

433518
return pos;
434519
}
@@ -441,10 +526,16 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
441526
{
442527
struct port_info *pi = netdev_priv(dev);
443528
struct adapter *adap = pi->adapter;
444-
unsigned int immdatalen = 0;
445529
unsigned int ivsize = GCM_ESP_IV_SIZE;
446530
struct chcr_ipsec_wr *wr;
531+
u16 immdatalen = 0;
447532
unsigned int flits;
533+
u32 ivinoffset;
534+
u32 aadstart;
535+
u32 aadstop;
536+
u32 ciphstart;
537+
u32 ivdrop = 0;
538+
u32 esnlen = 0;
448539
u32 wr_mid;
449540
int qidx = skb_get_queue_mapping(skb);
450541
struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
@@ -453,10 +544,17 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
453544

454545
atomic_inc(&adap->chcr_stats.ipsec_cnt);
455546

456-
flits = calc_tx_sec_flits(skb, kctx_len);
547+
flits = calc_tx_sec_flits(skb, sa_entry);
548+
if (sa_entry->esn)
549+
ivdrop = 1;
457550

458-
if (is_eth_imm(skb, kctx_len))
551+
if (is_eth_imm(skb, sa_entry)) {
459552
immdatalen = skb->len;
553+
sa_entry->imm = immdatalen;
554+
}
555+
556+
if (sa_entry->esn)
557+
esnlen = sizeof(struct chcr_ipsec_aadiv);
460558

461559
/* WR Header */
462560
wr = (struct chcr_ipsec_wr *)pos;
@@ -481,41 +579,46 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
481579
sizeof(wr->req.key_ctx) +
482580
kctx_len +
483581
sizeof(struct cpl_tx_pkt_core) +
484-
immdatalen);
582+
esnlen +
583+
(esnlen ? 0 : immdatalen));
485584

486585
/* CPL_SEC_PDU */
586+
ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
587+
(skb_transport_offset(skb) +
588+
sizeof(struct ip_esp_hdr) + 1);
487589
wr->req.sec_cpl.op_ivinsrtofst = htonl(
488590
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
489591
CPL_TX_SEC_PDU_CPLLEN_V(2) |
490592
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
491593
CPL_TX_SEC_PDU_IVINSRTOFST_V(
492-
(skb_transport_offset(skb) +
493-
sizeof(struct ip_esp_hdr) + 1)));
594+
ivinoffset));
494595

495-
wr->req.sec_cpl.pldlen = htonl(skb->len);
596+
wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
597+
aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
598+
aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
599+
(skb_transport_offset(skb) +
600+
sizeof(struct ip_esp_hdr));
601+
ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
602+
GCM_ESP_IV_SIZE + 1;
603+
ciphstart += sa_entry->esn ? esnlen : 0;
496604

497605
wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
498-
(skb_transport_offset(skb) + 1),
499-
(skb_transport_offset(skb) +
500-
sizeof(struct ip_esp_hdr)),
501-
(skb_transport_offset(skb) +
502-
sizeof(struct ip_esp_hdr) +
503-
GCM_ESP_IV_SIZE + 1), 0);
606+
aadstart,
607+
aadstop,
608+
ciphstart, 0);
504609

505610
wr->req.sec_cpl.cipherstop_lo_authinsert =
506-
FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
507-
sizeof(struct ip_esp_hdr) +
508-
GCM_ESP_IV_SIZE + 1,
509-
sa_entry->authsize,
510-
sa_entry->authsize);
611+
FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
612+
sa_entry->authsize,
613+
sa_entry->authsize);
511614
wr->req.sec_cpl.seqno_numivs =
512615
FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
513616
CHCR_SCMD_CIPHER_MODE_AES_GCM,
514617
CHCR_SCMD_AUTH_MODE_GHASH,
515618
sa_entry->hmac_ctrl,
516619
ivsize >> 1);
517620
wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
518-
0, 0, 0);
621+
0, ivdrop, 0);
519622

520623
pos += sizeof(struct fw_ulptx_wr) +
521624
sizeof(struct ulp_txpkt) +
@@ -593,7 +696,7 @@ out_free: dev_kfree_skb_any(skb);
593696

594697
cxgb4_reclaim_completed_tx(adap, &q->q, true);
595698

596-
flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
699+
flits = calc_tx_sec_flits(skb, sa_entry);
597700
ndesc = flits_to_desc(flits);
598701
credits = txq_avail(&q->q) - ndesc;
599702

@@ -606,7 +709,7 @@ out_free: dev_kfree_skb_any(skb);
606709
return NETDEV_TX_BUSY;
607710
}
608711

609-
if (is_eth_imm(skb, kctx_len))
712+
if (is_eth_imm(skb, sa_entry))
610713
immediate = true;
611714

612715
if (!immediate &&

0 commit comments

Comments
 (0)