|
7 | 7 | #include <net/xfrm.h> |
8 | 8 | #include <linux/netdevice.h> |
9 | 9 | #include <linux/bitfield.h> |
| 10 | +#include <crypto/aead.h> |
| 11 | +#include <crypto/gcm.h> |
10 | 12 |
|
11 | 13 | #include "otx2_common.h" |
| 14 | +#include "otx2_struct.h" |
12 | 15 | #include "cn10k_ipsec.h" |
13 | 16 |
|
| 17 | +DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled); |
| 18 | + |
14 | 19 | static bool is_dev_support_ipsec_offload(struct pci_dev *pdev) |
15 | 20 | { |
16 | 21 | return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev); |
@@ -690,6 +695,9 @@ static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, |
690 | 695 | } |
691 | 696 |
|
692 | 697 | x->xso.offload_handle = (unsigned long)sa_info; |
| 698 | + /* Enable static branch when first SA setup */ |
| 699 | + if (!pf->ipsec.outb_sa_count) |
| 700 | + static_branch_enable(&cn10k_ipsec_sa_enabled); |
693 | 701 | pf->ipsec.outb_sa_count++; |
694 | 702 | return 0; |
695 | 703 | } |
@@ -749,6 +757,8 @@ static void cn10k_ipsec_sa_wq_handler(struct work_struct *work) |
749 | 757 | sa_work); |
750 | 758 | struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec); |
751 | 759 |
|
| 760 | + /* Disable static branch when no more SA enabled */ |
| 761 | + static_branch_disable(&cn10k_ipsec_sa_enabled); |
752 | 762 | rtnl_lock(); |
753 | 763 | netdev_update_features(pf->netdev); |
754 | 764 | rtnl_unlock(); |
@@ -822,3 +832,212 @@ void cn10k_ipsec_clean(struct otx2_nic *pf) |
822 | 832 | cn10k_outb_cpt_clean(pf); |
823 | 833 | } |
824 | 834 | EXPORT_SYMBOL(cn10k_ipsec_clean); |
| 835 | + |
| 836 | +static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x, |
| 837 | + struct sk_buff *skb) |
| 838 | +{ |
| 839 | + struct ipv6hdr *ipv6h; |
| 840 | + struct iphdr *iph; |
| 841 | + u8 *src; |
| 842 | + |
| 843 | + src = (u8 *)skb->data + ETH_HLEN; |
| 844 | + |
| 845 | + if (x->props.family == AF_INET) { |
| 846 | + iph = (struct iphdr *)src; |
| 847 | + return ntohs(iph->tot_len); |
| 848 | + } |
| 849 | + |
| 850 | + ipv6h = (struct ipv6hdr *)src; |
| 851 | + return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr); |
| 852 | +} |
| 853 | + |
| 854 | +/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure. |
| 855 | + * SG of NIX and CPT are same in size. |
| 856 | + * Layout of a NIX SQE and CPT SG entry: |
| 857 | + * ----------------------------- |
| 858 | + * | CPT Scatter Gather | |
| 859 | + * | (SQE SIZE) | |
| 860 | + * | | |
| 861 | + * ----------------------------- |
| 862 | + * | NIX SQE | |
| 863 | + * | (SQE SIZE) | |
| 864 | + * | | |
| 865 | + * ----------------------------- |
| 866 | + */ |
| 867 | +bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, |
| 868 | + struct sk_buff *skb, int num_segs, int *offset) |
| 869 | +{ |
| 870 | + struct cpt_sg_s *cpt_sg = NULL; |
| 871 | + struct nix_sqe_sg_s *sg = NULL; |
| 872 | + u64 dma_addr, *iova = NULL; |
| 873 | + u64 *cpt_iova = NULL; |
| 874 | + u16 *sg_lens = NULL; |
| 875 | + int seg, len; |
| 876 | + |
| 877 | + sq->sg[sq->head].num_segs = 0; |
| 878 | + cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size); |
| 879 | + |
| 880 | + for (seg = 0; seg < num_segs; seg++) { |
| 881 | + if ((seg % MAX_SEGS_PER_SG) == 0) { |
| 882 | + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); |
| 883 | + sg->ld_type = NIX_SEND_LDTYPE_LDD; |
| 884 | + sg->subdc = NIX_SUBDC_SG; |
| 885 | + sg->segs = 0; |
| 886 | + sg_lens = (void *)sg; |
| 887 | + iova = (void *)sg + sizeof(*sg); |
| 888 | + /* Next subdc always starts at a 16byte boundary. |
| 889 | + * So if sg->segs is whether 2 or 3, offset += 16bytes. |
| 890 | + */ |
| 891 | + if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) |
| 892 | + *offset += sizeof(*sg) + (3 * sizeof(u64)); |
| 893 | + else |
| 894 | + *offset += sizeof(*sg) + sizeof(u64); |
| 895 | + |
| 896 | + cpt_sg += (seg / MAX_SEGS_PER_SG) * 4; |
| 897 | + cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg); |
| 898 | + } |
| 899 | + dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); |
| 900 | + if (dma_mapping_error(pfvf->dev, dma_addr)) |
| 901 | + return false; |
| 902 | + |
| 903 | + sg_lens[seg % MAX_SEGS_PER_SG] = len; |
| 904 | + sg->segs++; |
| 905 | + *iova++ = dma_addr; |
| 906 | + *cpt_iova++ = dma_addr; |
| 907 | + |
| 908 | + /* Save DMA mapping info for later unmapping */ |
| 909 | + sq->sg[sq->head].dma_addr[seg] = dma_addr; |
| 910 | + sq->sg[sq->head].size[seg] = len; |
| 911 | + sq->sg[sq->head].num_segs++; |
| 912 | + |
| 913 | + *cpt_sg = *(struct cpt_sg_s *)sg; |
| 914 | + cpt_sg->rsvd_63_50 = 0; |
| 915 | + } |
| 916 | + |
| 917 | + sq->sg[sq->head].skb = (u64)skb; |
| 918 | + return true; |
| 919 | +} |
| 920 | + |
| 921 | +static u16 cn10k_ipsec_get_param1(u8 iv_offset) |
| 922 | +{ |
| 923 | + u16 param1_val; |
| 924 | + |
| 925 | + /* Set Crypto mode, disable L3/L4 checksum */ |
| 926 | + param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM | |
| 927 | + CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM; |
| 928 | + param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT; |
| 929 | + return param1_val; |
| 930 | +} |
| 931 | + |
| 932 | +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, |
| 933 | + struct otx2_snd_queue *sq, struct sk_buff *skb, |
| 934 | + int num_segs, int size) |
| 935 | +{ |
| 936 | + struct cpt_inst_s inst; |
| 937 | + struct cpt_res_s *res; |
| 938 | + struct xfrm_state *x; |
| 939 | + struct qmem *sa_info; |
| 940 | + dma_addr_t dptr_iova; |
| 941 | + struct sec_path *sp; |
| 942 | + u8 encap_offset; |
| 943 | + u8 auth_offset; |
| 944 | + u8 gthr_size; |
| 945 | + u8 iv_offset; |
| 946 | + u16 dlen; |
| 947 | + |
| 948 | + /* Check for IPSEC offload enabled */ |
| 949 | + if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) |
| 950 | + goto drop; |
| 951 | + |
| 952 | + sp = skb_sec_path(skb); |
| 953 | + if (unlikely(!sp->len)) |
| 954 | + goto drop; |
| 955 | + |
| 956 | + x = xfrm_input_state(skb); |
| 957 | + if (unlikely(!x)) |
| 958 | + goto drop; |
| 959 | + |
| 960 | + if (x->props.mode != XFRM_MODE_TRANSPORT && |
| 961 | + x->props.mode != XFRM_MODE_TUNNEL) |
| 962 | + goto drop; |
| 963 | + |
| 964 | + dlen = cn10k_ipsec_get_ip_data_len(x, skb); |
| 965 | + if (dlen == 0 && netif_msg_tx_err(pf)) { |
| 966 | + netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n"); |
| 967 | + goto drop; |
| 968 | + } |
| 969 | + |
| 970 | + /* Check for valid SA context */ |
| 971 | + sa_info = (struct qmem *)x->xso.offload_handle; |
| 972 | + if (!sa_info) |
| 973 | + goto drop; |
| 974 | + |
| 975 | + memset(&inst, 0, sizeof(struct cpt_inst_s)); |
| 976 | + |
| 977 | + /* Get authentication offset */ |
| 978 | + if (x->props.family == AF_INET) |
| 979 | + auth_offset = sizeof(struct iphdr); |
| 980 | + else |
| 981 | + auth_offset = sizeof(struct ipv6hdr); |
| 982 | + |
| 983 | + /* IV offset is after ESP header */ |
| 984 | + iv_offset = auth_offset + sizeof(struct ip_esp_hdr); |
| 985 | + /* Encap will start after IV */ |
| 986 | + encap_offset = iv_offset + GCM_RFC4106_IV_SIZE; |
| 987 | + |
| 988 | + /* CPT Instruction word-1 */ |
| 989 | + res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head)); |
| 990 | + res->compcode = 0; |
| 991 | + inst.res_addr = sq->cpt_resp->iova + (64 * sq->head); |
| 992 | + |
| 993 | + /* CPT Instruction word-2 */ |
| 994 | + inst.rvu_pf_func = pf->pcifunc; |
| 995 | + |
| 996 | + /* CPT Instruction word-3: |
| 997 | + * Set QORD to force CPT_RES_S write completion |
| 998 | + */ |
| 999 | + inst.qord = 1; |
| 1000 | + |
| 1001 | + /* CPT Instruction word-4 */ |
| 1002 | + /* inst.dlen should not include ICV length */ |
| 1003 | + inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8); |
| 1004 | + inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC; |
| 1005 | + inst.param1 = cn10k_ipsec_get_param1(iv_offset); |
| 1006 | + |
| 1007 | + inst.param2 = encap_offset << |
| 1008 | + CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT; |
| 1009 | + inst.param2 |= (u16)auth_offset << |
| 1010 | + CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT; |
| 1011 | + |
| 1012 | + /* CPT Instruction word-5 */ |
| 1013 | + gthr_size = num_segs / MAX_SEGS_PER_SG; |
| 1014 | + gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size; |
| 1015 | + |
| 1016 | + gthr_size &= 0xF; |
| 1017 | + dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2))); |
| 1018 | + inst.dptr = dptr_iova | ((u64)gthr_size << 60); |
| 1019 | + |
| 1020 | + /* CPT Instruction word-6 */ |
| 1021 | + inst.rptr = inst.dptr; |
| 1022 | + |
| 1023 | + /* CPT Instruction word-7 */ |
| 1024 | + inst.cptr = sa_info->iova; |
| 1025 | + inst.ctx_val = 1; |
| 1026 | + inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP; |
| 1027 | + |
| 1028 | + /* CPT Instruction word-0 */ |
| 1029 | + inst.nixtxl = (size / 16) - 1; |
| 1030 | + inst.dat_offset = ETH_HLEN; |
| 1031 | + inst.nixtx_offset = sq->sqe_size; |
| 1032 | + |
| 1033 | + netdev_tx_sent_queue(txq, skb->len); |
| 1034 | + |
| 1035 | + /* Finally Flush the CPT instruction */ |
| 1036 | + sq->head++; |
| 1037 | + sq->head &= (sq->sqe_cnt - 1); |
| 1038 | + cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); |
| 1039 | + return true; |
| 1040 | +drop: |
| 1041 | + dev_kfree_skb_any(skb); |
| 1042 | + return false; |
| 1043 | +} |
0 commit comments