Skip to content

Commit db23ae6

Browse files
hz-chengjgunthorpe
authored andcommitted
RDMA/erdma: Add verbs header file
This header file defines the main structures and functions used for RDMA Verbs, including qp, cq, mr, ucontext, etc,. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Cheng Xu <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent f2a0a63 commit db23ae6

File tree

1 file changed

+342
-0
lines changed

1 file changed

+342
-0
lines changed
Lines changed: 342 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,342 @@
1+
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2+
3+
/* Authors: Cheng Xu <[email protected]> */
4+
/* Kai Shen <[email protected]> */
5+
/* Copyright (c) 2020-2022, Alibaba Group. */
6+
7+
#ifndef __ERDMA_VERBS_H__
8+
#define __ERDMA_VERBS_H__
9+
10+
#include <linux/errno.h>
11+
12+
#include <rdma/ib_verbs.h>
13+
#include <rdma/ib_user_verbs.h>
14+
#include <rdma/iw_cm.h>
15+
16+
#include "erdma.h"
17+
#include "erdma_cm.h"
18+
#include "erdma_hw.h"
19+
20+
/* RDMA Capability. */
21+
#define ERDMA_MAX_PD (128 * 1024)
22+
#define ERDMA_MAX_SEND_WR 4096
23+
#define ERDMA_MAX_ORD 128
24+
#define ERDMA_MAX_IRD 128
25+
#define ERDMA_MAX_SGE_RD 1
26+
#define ERDMA_MAX_CONTEXT (128 * 1024)
27+
#define ERDMA_MAX_SEND_SGE 6
28+
#define ERDMA_MAX_RECV_SGE 1
29+
#define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE))
30+
#define ERDMA_MAX_FRMR_PA 512
31+
32+
enum {
33+
ERDMA_MMAP_IO_NC = 0, /* no cache */
34+
};
35+
36+
struct erdma_user_mmap_entry {
37+
struct rdma_user_mmap_entry rdma_entry;
38+
u64 address;
39+
u8 mmap_flag;
40+
};
41+
42+
struct erdma_ucontext {
43+
struct ib_ucontext ibucontext;
44+
45+
u32 sdb_type;
46+
u32 sdb_idx;
47+
u32 sdb_page_idx;
48+
u32 sdb_page_off;
49+
u64 sdb;
50+
u64 rdb;
51+
u64 cdb;
52+
53+
struct rdma_user_mmap_entry *sq_db_mmap_entry;
54+
struct rdma_user_mmap_entry *rq_db_mmap_entry;
55+
struct rdma_user_mmap_entry *cq_db_mmap_entry;
56+
57+
/* doorbell records */
58+
struct list_head dbrecords_page_list;
59+
struct mutex dbrecords_page_mutex;
60+
};
61+
62+
struct erdma_pd {
63+
struct ib_pd ibpd;
64+
u32 pdn;
65+
};
66+
67+
/*
68+
* MemoryRegion definition.
69+
*/
70+
#define ERDMA_MAX_INLINE_MTT_ENTRIES 4
71+
#define MTT_SIZE(mtt_cnt) (mtt_cnt << 3) /* per mtt takes 8 Bytes. */
72+
#define ERDMA_MR_MAX_MTT_CNT 524288
73+
#define ERDMA_MTT_ENTRY_SIZE 8
74+
75+
#define ERDMA_MR_TYPE_NORMAL 0
76+
#define ERDMA_MR_TYPE_FRMR 1
77+
#define ERDMA_MR_TYPE_DMA 2
78+
79+
#define ERDMA_MR_INLINE_MTT 0
80+
#define ERDMA_MR_INDIRECT_MTT 1
81+
82+
#define ERDMA_MR_ACC_LR BIT(0)
83+
#define ERDMA_MR_ACC_LW BIT(1)
84+
#define ERDMA_MR_ACC_RR BIT(2)
85+
#define ERDMA_MR_ACC_RW BIT(3)
86+
87+
static inline u8 to_erdma_access_flags(int access)
88+
{
89+
return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) |
90+
(access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) |
91+
(access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0);
92+
}
93+
94+
struct erdma_mem {
95+
struct ib_umem *umem;
96+
void *mtt_buf;
97+
u32 mtt_type;
98+
u32 page_size;
99+
u32 page_offset;
100+
u32 page_cnt;
101+
u32 mtt_nents;
102+
103+
u64 va;
104+
u64 len;
105+
106+
u64 mtt_entry[ERDMA_MAX_INLINE_MTT_ENTRIES];
107+
};
108+
109+
struct erdma_mr {
110+
struct ib_mr ibmr;
111+
struct erdma_mem mem;
112+
u8 type;
113+
u8 access;
114+
u8 valid;
115+
};
116+
117+
struct erdma_user_dbrecords_page {
118+
struct list_head list;
119+
struct ib_umem *umem;
120+
u64 va;
121+
int refcnt;
122+
};
123+
124+
struct erdma_uqp {
125+
struct erdma_mem sq_mtt;
126+
struct erdma_mem rq_mtt;
127+
128+
dma_addr_t sq_db_info_dma_addr;
129+
dma_addr_t rq_db_info_dma_addr;
130+
131+
struct erdma_user_dbrecords_page *user_dbr_page;
132+
133+
u32 rq_offset;
134+
};
135+
136+
struct erdma_kqp {
137+
u16 sq_pi;
138+
u16 sq_ci;
139+
140+
u16 rq_pi;
141+
u16 rq_ci;
142+
143+
u64 *swr_tbl;
144+
u64 *rwr_tbl;
145+
146+
void __iomem *hw_sq_db;
147+
void __iomem *hw_rq_db;
148+
149+
void *sq_buf;
150+
dma_addr_t sq_buf_dma_addr;
151+
152+
void *rq_buf;
153+
dma_addr_t rq_buf_dma_addr;
154+
155+
void *sq_db_info;
156+
void *rq_db_info;
157+
158+
u8 sig_all;
159+
};
160+
161+
enum erdma_qp_state {
162+
ERDMA_QP_STATE_IDLE = 0,
163+
ERDMA_QP_STATE_RTR = 1,
164+
ERDMA_QP_STATE_RTS = 2,
165+
ERDMA_QP_STATE_CLOSING = 3,
166+
ERDMA_QP_STATE_TERMINATE = 4,
167+
ERDMA_QP_STATE_ERROR = 5,
168+
ERDMA_QP_STATE_UNDEF = 7,
169+
ERDMA_QP_STATE_COUNT = 8
170+
};
171+
172+
enum erdma_qp_attr_mask {
173+
ERDMA_QP_ATTR_STATE = (1 << 0),
174+
ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
175+
ERDMA_QP_ATTR_ORD = (1 << 3),
176+
ERDMA_QP_ATTR_IRD = (1 << 4),
177+
ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
178+
ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
179+
ERDMA_QP_ATTR_MPA = (1 << 7)
180+
};
181+
182+
struct erdma_qp_attrs {
183+
enum erdma_qp_state state;
184+
enum erdma_cc_alg cc; /* Congestion control algorithm */
185+
u32 sq_size;
186+
u32 rq_size;
187+
u32 orq_size;
188+
u32 irq_size;
189+
u32 max_send_sge;
190+
u32 max_recv_sge;
191+
u32 cookie;
192+
#define ERDMA_QP_ACTIVE 0
193+
#define ERDMA_QP_PASSIVE 1
194+
u8 qp_type;
195+
u8 pd_len;
196+
};
197+
198+
struct erdma_qp {
199+
struct ib_qp ibqp;
200+
struct kref ref;
201+
struct completion safe_free;
202+
struct erdma_dev *dev;
203+
struct erdma_cep *cep;
204+
struct rw_semaphore state_lock;
205+
206+
union {
207+
struct erdma_kqp kern_qp;
208+
struct erdma_uqp user_qp;
209+
};
210+
211+
struct erdma_cq *scq;
212+
struct erdma_cq *rcq;
213+
214+
struct erdma_qp_attrs attrs;
215+
spinlock_t lock;
216+
};
217+
218+
struct erdma_kcq_info {
219+
void *qbuf;
220+
dma_addr_t qbuf_dma_addr;
221+
u32 ci;
222+
u32 cmdsn;
223+
u32 notify_cnt;
224+
225+
spinlock_t lock;
226+
u8 __iomem *db;
227+
u64 *db_record;
228+
};
229+
230+
struct erdma_ucq_info {
231+
struct erdma_mem qbuf_mtt;
232+
struct erdma_user_dbrecords_page *user_dbr_page;
233+
dma_addr_t db_info_dma_addr;
234+
};
235+
236+
struct erdma_cq {
237+
struct ib_cq ibcq;
238+
u32 cqn;
239+
240+
u32 depth;
241+
u32 assoc_eqn;
242+
243+
union {
244+
struct erdma_kcq_info kern_cq;
245+
struct erdma_ucq_info user_cq;
246+
};
247+
};
248+
249+
#define QP_ID(qp) ((qp)->ibqp.qp_num)
250+
251+
static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id)
252+
{
253+
return (struct erdma_qp *)xa_load(&dev->qp_xa, id);
254+
}
255+
256+
static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
257+
{
258+
return (struct erdma_cq *)xa_load(&dev->cq_xa, id);
259+
}
260+
261+
void erdma_qp_get(struct erdma_qp *qp);
262+
void erdma_qp_put(struct erdma_qp *qp);
263+
int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
264+
enum erdma_qp_attr_mask mask);
265+
void erdma_qp_llp_close(struct erdma_qp *qp);
266+
void erdma_qp_cm_drop(struct erdma_qp *qp);
267+
268+
static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
269+
{
270+
return container_of(ibctx, struct erdma_ucontext, ibucontext);
271+
}
272+
273+
static inline struct erdma_pd *to_epd(struct ib_pd *pd)
274+
{
275+
return container_of(pd, struct erdma_pd, ibpd);
276+
}
277+
278+
static inline struct erdma_mr *to_emr(struct ib_mr *ibmr)
279+
{
280+
return container_of(ibmr, struct erdma_mr, ibmr);
281+
}
282+
283+
static inline struct erdma_qp *to_eqp(struct ib_qp *qp)
284+
{
285+
return container_of(qp, struct erdma_qp, ibqp);
286+
}
287+
288+
static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
289+
{
290+
return container_of(ibcq, struct erdma_cq, ibcq);
291+
}
292+
293+
static inline struct erdma_user_mmap_entry *
294+
to_emmap(struct rdma_user_mmap_entry *ibmmap)
295+
{
296+
return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry);
297+
}
298+
299+
int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data);
300+
void erdma_dealloc_ucontext(struct ib_ucontext *ibctx);
301+
int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr,
302+
struct ib_udata *data);
303+
int erdma_get_port_immutable(struct ib_device *dev, u32 port,
304+
struct ib_port_immutable *ib_port_immutable);
305+
int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
306+
struct ib_udata *data);
307+
int erdma_query_port(struct ib_device *dev, u32 port,
308+
struct ib_port_attr *attr);
309+
int erdma_query_gid(struct ib_device *dev, u32 port, int idx,
310+
union ib_gid *gid);
311+
int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data);
312+
int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
313+
int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
314+
struct ib_udata *data);
315+
int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
316+
struct ib_qp_init_attr *init_attr);
317+
int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
318+
struct ib_udata *data);
319+
int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
320+
int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
321+
int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
322+
struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
323+
u64 virt, int access, struct ib_udata *udata);
324+
struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
325+
int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
326+
int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
327+
void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
328+
void erdma_qp_get_ref(struct ib_qp *ibqp);
329+
void erdma_qp_put_ref(struct ib_qp *ibqp);
330+
struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
331+
int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
332+
const struct ib_send_wr **bad_send_wr);
333+
int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
334+
const struct ib_recv_wr **bad_recv_wr);
335+
int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
336+
struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
337+
u32 max_num_sg);
338+
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
339+
unsigned int *sg_offset);
340+
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
341+
342+
#endif

0 commit comments

Comments
 (0)