Skip to content

Commit 968606e

Browse files
Luoyoumingjgunthorpe
authored andcommitted
RDMA/hns: Remove rq inline in kernel
The roce driver kernel space will no longer provide support for the rq inline feature. This patch deletes the code related to the rq inline feature in the kernel space. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Luoyouming <[email protected]> Signed-off-by: Haoyue Xu <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent bd99ede commit 968606e

File tree

3 files changed

+0
-147
lines changed

3 files changed

+0
-147
lines changed

drivers/infiniband/hw/hns/hns_roce_device.h

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -567,21 +567,6 @@ struct hns_roce_mbox_msg {
567567

568568
struct hns_roce_dev;
569569

570-
struct hns_roce_rinl_sge {
571-
void *addr;
572-
u32 len;
573-
};
574-
575-
struct hns_roce_rinl_wqe {
576-
struct hns_roce_rinl_sge *sg_list;
577-
u32 sge_cnt;
578-
};
579-
580-
struct hns_roce_rinl_buf {
581-
struct hns_roce_rinl_wqe *wqe_list;
582-
u32 wqe_cnt;
583-
};
584-
585570
enum {
586571
HNS_ROCE_FLUSH_FLAG = 0,
587572
};
@@ -632,7 +617,6 @@ struct hns_roce_qp {
632617
/* 0: flush needed, 1: unneeded */
633618
unsigned long flush_flag;
634619
struct hns_roce_work flush_work;
635-
struct hns_roce_rinl_buf rq_inl_buf;
636620
struct list_head node; /* all qps are on a list */
637621
struct list_head rq_node; /* all recv qps are on a list */
638622
struct list_head sq_node; /* all send qps are on a list */

drivers/infiniband/hw/hns/hns_roce_hw_v2.c

Lines changed: 0 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -821,22 +821,10 @@ static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
821821
static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
822822
u32 wqe_idx, u32 max_sge)
823823
{
824-
struct hns_roce_rinl_sge *sge_list;
825824
void *wqe = NULL;
826-
u32 i;
827825

828826
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
829827
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
830-
831-
/* rq support inline data */
832-
if (hr_qp->rq_inl_buf.wqe_cnt) {
833-
sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
834-
hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
835-
for (i = 0; i < wr->num_sge; i++) {
836-
sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
837-
sge_list[i].len = wr->sg_list[i].length;
838-
}
839-
}
840828
}
841829

842830
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
@@ -3730,39 +3718,6 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
37303718
return 0;
37313719
}
37323720

3733-
static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
3734-
struct hns_roce_qp *qp,
3735-
struct ib_wc *wc)
3736-
{
3737-
struct hns_roce_rinl_sge *sge_list;
3738-
u32 wr_num, wr_cnt, sge_num;
3739-
u32 sge_cnt, data_len, size;
3740-
void *wqe_buf;
3741-
3742-
wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
3743-
wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
3744-
3745-
sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
3746-
sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
3747-
wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
3748-
data_len = wc->byte_len;
3749-
3750-
for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
3751-
size = min(sge_list[sge_cnt].len, data_len);
3752-
memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
3753-
3754-
data_len -= size;
3755-
wqe_buf += size;
3756-
}
3757-
3758-
if (unlikely(data_len)) {
3759-
wc->status = IB_WC_LOC_LEN_ERR;
3760-
return -EAGAIN;
3761-
}
3762-
3763-
return 0;
3764-
}
3765-
37663721
static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
37673722
int num_entries, struct ib_wc *wc)
37683723
{
@@ -3974,22 +3929,10 @@ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
39743929
wc->opcode = ib_opcode;
39753930
}
39763931

3977-
static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
3978-
struct hns_roce_v2_cqe *cqe)
3979-
{
3980-
return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
3981-
(hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
3982-
hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3983-
hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3984-
hr_reg_read(cqe, CQE_RQ_INLINE);
3985-
}
3986-
39873932
static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
39883933
{
3989-
struct hns_roce_qp *qp = to_hr_qp(wc->qp);
39903934
u32 hr_opcode;
39913935
int ib_opcode;
3992-
int ret;
39933936

39943937
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
39953938

@@ -4014,12 +3957,6 @@ static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
40143957
else
40153958
wc->opcode = ib_opcode;
40163959

4017-
if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
4018-
ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
4019-
if (unlikely(ret))
4020-
return ret;
4021-
}
4022-
40233960
wc->sl = hr_reg_read(cqe, CQE_SL);
40243961
wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
40253962
wc->slid = 0;
@@ -4445,10 +4382,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
44454382
hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
44464383
upper_32_bits(hr_qp->rdb.dma));
44474384

4448-
if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
4449-
hr_reg_write_bool(context, QPC_RQIE,
4450-
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
4451-
44524385
hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
44534386

44544387
if (ibqp->srq) {

drivers/infiniband/hw/hns/hns_roce_qp.c

Lines changed: 0 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -433,7 +433,6 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
433433
if (!has_rq) {
434434
hr_qp->rq.wqe_cnt = 0;
435435
hr_qp->rq.max_gs = 0;
436-
hr_qp->rq_inl_buf.wqe_cnt = 0;
437436
cap->max_recv_wr = 0;
438437
cap->max_recv_sge = 0;
439438

@@ -463,12 +462,6 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
463462
hr_qp->rq.max_gs);
464463

465464
hr_qp->rq.wqe_cnt = cnt;
466-
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
467-
hr_qp->ibqp.qp_type != IB_QPT_UD &&
468-
hr_qp->ibqp.qp_type != IB_QPT_GSI)
469-
hr_qp->rq_inl_buf.wqe_cnt = cnt;
470-
else
471-
hr_qp->rq_inl_buf.wqe_cnt = 0;
472465

473466
cap->max_recv_wr = cnt;
474467
cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
@@ -732,49 +725,6 @@ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
732725
return 1;
733726
}
734727

735-
static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
736-
struct ib_qp_init_attr *init_attr)
737-
{
738-
u32 max_recv_sge = init_attr->cap.max_recv_sge;
739-
u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
740-
struct hns_roce_rinl_wqe *wqe_list;
741-
int i;
742-
743-
/* allocate recv inline buf */
744-
wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
745-
GFP_KERNEL);
746-
if (!wqe_list)
747-
goto err;
748-
749-
/* Allocate a continuous buffer for all inline sge we need */
750-
wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
751-
sizeof(struct hns_roce_rinl_sge)),
752-
GFP_KERNEL);
753-
if (!wqe_list[0].sg_list)
754-
goto err_wqe_list;
755-
756-
/* Assign buffers of sg_list to each inline wqe */
757-
for (i = 1; i < wqe_cnt; i++)
758-
wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
759-
760-
hr_qp->rq_inl_buf.wqe_list = wqe_list;
761-
762-
return 0;
763-
764-
err_wqe_list:
765-
kfree(wqe_list);
766-
767-
err:
768-
return -ENOMEM;
769-
}
770-
771-
static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
772-
{
773-
if (hr_qp->rq_inl_buf.wqe_list)
774-
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
775-
kfree(hr_qp->rq_inl_buf.wqe_list);
776-
}
777-
778728
static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
779729
struct ib_qp_init_attr *init_attr,
780730
struct ib_udata *udata, unsigned long addr)
@@ -783,18 +733,6 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
783733
struct hns_roce_buf_attr buf_attr = {};
784734
int ret;
785735

786-
if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
787-
ret = alloc_rq_inline_buf(hr_qp, init_attr);
788-
if (ret) {
789-
ibdev_err(ibdev,
790-
"failed to alloc inline buf, ret = %d.\n",
791-
ret);
792-
return ret;
793-
}
794-
} else {
795-
hr_qp->rq_inl_buf.wqe_list = NULL;
796-
}
797-
798736
ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
799737
if (ret) {
800738
ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
@@ -814,15 +752,13 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
814752
return 0;
815753

816754
err_inline:
817-
free_rq_inline_buf(hr_qp);
818755

819756
return ret;
820757
}
821758

822759
static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
823760
{
824761
hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
825-
free_rq_inline_buf(hr_qp);
826762
}
827763

828764
static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,

0 commit comments

Comments
 (0)