Skip to content

Commit 28c66cf

Browse files
ms-ajaysharmarleon
authored andcommitted
net: mana: Define data structures for protection domain and memory registration
The MANA hardware support protection domain and memory registration for use in RDMA environment. Add those definitions and expose them for use by the RDMA driver. Signed-off-by: Ajay Sharma <[email protected]> Signed-off-by: Long Li <[email protected]> Link: https://lore.kernel.org/r/[email protected] Reviewed-by: Dexuan Cui <[email protected]> Acked-by: Haiyang Zhang <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]>
1 parent f72ecec commit 28c66cf

File tree

3 files changed

+143
-23
lines changed

3 files changed

+143
-23
lines changed

drivers/net/ethernet/microsoft/mana/gdma_main.c

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
198198
req.type = queue->type;
199199
req.pdid = queue->gdma_dev->pdid;
200200
req.doolbell_id = queue->gdma_dev->doorbell;
201-
req.gdma_region = queue->mem_info.gdma_region;
201+
req.gdma_region = queue->mem_info.dma_region_handle;
202202
req.queue_size = queue->queue_size;
203203
req.log2_throttle_limit = queue->eq.log2_throttle_limit;
204204
req.eq_pci_msix_index = queue->eq.msix_index;
@@ -212,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
212212

213213
queue->id = resp.queue_index;
214214
queue->eq.disable_needed = true;
215-
queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
215+
queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
216216
return 0;
217217
}
218218

@@ -671,24 +671,30 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
671671
return err;
672672
}
673673

674-
static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
674+
int mana_gd_destroy_dma_region(struct gdma_context *gc,
675+
gdma_obj_handle_t dma_region_handle)
675676
{
676677
struct gdma_destroy_dma_region_req req = {};
677678
struct gdma_general_resp resp = {};
678679
int err;
679680

680-
if (gdma_region == GDMA_INVALID_DMA_REGION)
681-
return;
681+
if (dma_region_handle == GDMA_INVALID_DMA_REGION)
682+
return 0;
682683

683684
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
684685
sizeof(resp));
685-
req.gdma_region = gdma_region;
686+
req.dma_region_handle = dma_region_handle;
686687

687688
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
688-
if (err || resp.hdr.status)
689+
if (err || resp.hdr.status) {
689690
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
690691
err, resp.hdr.status);
692+
return -EPROTO;
693+
}
694+
695+
return 0;
691696
}
697+
EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
692698

693699
static int mana_gd_create_dma_region(struct gdma_dev *gd,
694700
struct gdma_mem_info *gmi)
@@ -733,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
733739
if (err)
734740
goto out;
735741

736-
if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
742+
if (resp.hdr.status ||
743+
resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
737744
dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
738745
resp.hdr.status);
739746
err = -EPROTO;
740747
goto out;
741748
}
742749

743-
gmi->gdma_region = resp.gdma_region;
750+
gmi->dma_region_handle = resp.dma_region_handle;
744751
out:
745752
kfree(req);
746753
return err;
@@ -863,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
863870
return;
864871
}
865872

866-
mana_gd_destroy_dma_region(gc, gmi->gdma_region);
873+
mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
867874
mana_gd_free_memory(gmi);
868875
kfree(queue);
869876
}

drivers/net/ethernet/microsoft/mana/mana_en.c

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1523,10 +1523,10 @@ static int mana_create_txq(struct mana_port_context *apc,
15231523
memset(&wq_spec, 0, sizeof(wq_spec));
15241524
memset(&cq_spec, 0, sizeof(cq_spec));
15251525

1526-
wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1526+
wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
15271527
wq_spec.queue_size = txq->gdma_sq->queue_size;
15281528

1529-
cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1529+
cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
15301530
cq_spec.queue_size = cq->gdma_cq->queue_size;
15311531
cq_spec.modr_ctx_id = 0;
15321532
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@@ -1541,8 +1541,10 @@ static int mana_create_txq(struct mana_port_context *apc,
15411541
txq->gdma_sq->id = wq_spec.queue_index;
15421542
cq->gdma_cq->id = cq_spec.queue_index;
15431543

1544-
txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1545-
cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1544+
txq->gdma_sq->mem_info.dma_region_handle =
1545+
GDMA_INVALID_DMA_REGION;
1546+
cq->gdma_cq->mem_info.dma_region_handle =
1547+
GDMA_INVALID_DMA_REGION;
15461548

15471549
txq->gdma_txq_id = txq->gdma_sq->id;
15481550

@@ -1753,10 +1755,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
17531755

17541756
memset(&wq_spec, 0, sizeof(wq_spec));
17551757
memset(&cq_spec, 0, sizeof(cq_spec));
1756-
wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
1758+
wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
17571759
wq_spec.queue_size = rxq->gdma_rq->queue_size;
17581760

1759-
cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1761+
cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
17601762
cq_spec.queue_size = cq->gdma_cq->queue_size;
17611763
cq_spec.modr_ctx_id = 0;
17621764
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@@ -1769,8 +1771,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
17691771
rxq->gdma_rq->id = wq_spec.queue_index;
17701772
cq->gdma_cq->id = cq_spec.queue_index;
17711773

1772-
rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1773-
cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1774+
rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
1775+
cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
17741776

17751777
rxq->gdma_id = rxq->gdma_rq->id;
17761778
cq->gdma_id = cq->gdma_cq->id;

include/net/mana/gdma.h

Lines changed: 116 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,10 @@ enum gdma_request_type {
2929
GDMA_CREATE_DMA_REGION = 25,
3030
GDMA_DMA_REGION_ADD_PAGES = 26,
3131
GDMA_DESTROY_DMA_REGION = 27,
32+
GDMA_CREATE_PD = 29,
33+
GDMA_DESTROY_PD = 30,
34+
GDMA_CREATE_MR = 31,
35+
GDMA_DESTROY_MR = 32,
3236
};
3337

3438
#define GDMA_RESOURCE_DOORBELL_PAGE 27
@@ -61,6 +65,8 @@ enum {
6165
GDMA_DEVICE_MANA = 2,
6266
};
6367

68+
typedef u64 gdma_obj_handle_t;
69+
6470
struct gdma_resource {
6571
/* Protect the bitmap */
6672
spinlock_t lock;
@@ -194,7 +200,7 @@ struct gdma_mem_info {
194200
u64 length;
195201

196202
/* Allocated by the PF driver */
197-
u64 gdma_region;
203+
gdma_obj_handle_t dma_region_handle;
198204
};
199205

200206
#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
@@ -618,7 +624,7 @@ struct gdma_create_queue_req {
618624
u32 reserved1;
619625
u32 pdid;
620626
u32 doolbell_id;
621-
u64 gdma_region;
627+
gdma_obj_handle_t gdma_region;
622628
u32 reserved2;
623629
u32 queue_size;
624630
u32 log2_throttle_limit;
@@ -645,6 +651,28 @@ struct gdma_disable_queue_req {
645651
u32 alloc_res_id_on_creation;
646652
}; /* HW DATA */
647653

654+
enum atb_page_size {
655+
ATB_PAGE_SIZE_4K,
656+
ATB_PAGE_SIZE_8K,
657+
ATB_PAGE_SIZE_16K,
658+
ATB_PAGE_SIZE_32K,
659+
ATB_PAGE_SIZE_64K,
660+
ATB_PAGE_SIZE_128K,
661+
ATB_PAGE_SIZE_256K,
662+
ATB_PAGE_SIZE_512K,
663+
ATB_PAGE_SIZE_1M,
664+
ATB_PAGE_SIZE_2M,
665+
ATB_PAGE_SIZE_MAX,
666+
};
667+
668+
enum gdma_mr_access_flags {
669+
GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
670+
GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
671+
GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
672+
GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
673+
GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
674+
};
675+
648676
/* GDMA_CREATE_DMA_REGION */
649677
struct gdma_create_dma_region_req {
650678
struct gdma_req_hdr hdr;
@@ -671,14 +699,14 @@ struct gdma_create_dma_region_req {
671699

672700
struct gdma_create_dma_region_resp {
673701
struct gdma_resp_hdr hdr;
674-
u64 gdma_region;
702+
gdma_obj_handle_t dma_region_handle;
675703
}; /* HW DATA */
676704

677705
/* GDMA_DMA_REGION_ADD_PAGES */
678706
struct gdma_dma_region_add_pages_req {
679707
struct gdma_req_hdr hdr;
680708

681-
u64 gdma_region;
709+
gdma_obj_handle_t dma_region_handle;
682710

683711
u32 page_addr_list_len;
684712
u32 reserved3;
@@ -690,9 +718,88 @@ struct gdma_dma_region_add_pages_req {
690718
struct gdma_destroy_dma_region_req {
691719
struct gdma_req_hdr hdr;
692720

693-
u64 gdma_region;
721+
gdma_obj_handle_t dma_region_handle;
694722
}; /* HW DATA */
695723

724+
enum gdma_pd_flags {
725+
GDMA_PD_FLAG_INVALID = 0,
726+
};
727+
728+
struct gdma_create_pd_req {
729+
struct gdma_req_hdr hdr;
730+
enum gdma_pd_flags flags;
731+
u32 reserved;
732+
};/* HW DATA */
733+
734+
struct gdma_create_pd_resp {
735+
struct gdma_resp_hdr hdr;
736+
gdma_obj_handle_t pd_handle;
737+
u32 pd_id;
738+
u32 reserved;
739+
};/* HW DATA */
740+
741+
struct gdma_destroy_pd_req {
742+
struct gdma_req_hdr hdr;
743+
gdma_obj_handle_t pd_handle;
744+
};/* HW DATA */
745+
746+
struct gdma_destory_pd_resp {
747+
struct gdma_resp_hdr hdr;
748+
};/* HW DATA */
749+
750+
enum gdma_mr_type {
751+
/* Guest Virtual Address - MRs of this type allow access
752+
* to memory mapped by PTEs associated with this MR using a virtual
753+
* address that is set up in the MST
754+
*/
755+
GDMA_MR_TYPE_GVA = 2,
756+
};
757+
758+
struct gdma_create_mr_params {
759+
gdma_obj_handle_t pd_handle;
760+
enum gdma_mr_type mr_type;
761+
union {
762+
struct {
763+
gdma_obj_handle_t dma_region_handle;
764+
u64 virtual_address;
765+
enum gdma_mr_access_flags access_flags;
766+
} gva;
767+
};
768+
};
769+
770+
struct gdma_create_mr_request {
771+
struct gdma_req_hdr hdr;
772+
gdma_obj_handle_t pd_handle;
773+
enum gdma_mr_type mr_type;
774+
u32 reserved_1;
775+
776+
union {
777+
struct {
778+
gdma_obj_handle_t dma_region_handle;
779+
u64 virtual_address;
780+
enum gdma_mr_access_flags access_flags;
781+
} gva;
782+
783+
};
784+
u32 reserved_2;
785+
};/* HW DATA */
786+
787+
struct gdma_create_mr_response {
788+
struct gdma_resp_hdr hdr;
789+
gdma_obj_handle_t mr_handle;
790+
u32 lkey;
791+
u32 rkey;
792+
};/* HW DATA */
793+
794+
struct gdma_destroy_mr_request {
795+
struct gdma_req_hdr hdr;
796+
gdma_obj_handle_t mr_handle;
797+
};/* HW DATA */
798+
799+
struct gdma_destroy_mr_response {
800+
struct gdma_resp_hdr hdr;
801+
};/* HW DATA */
802+
696803
int mana_gd_verify_vf_version(struct pci_dev *pdev);
697804

698805
int mana_gd_register_device(struct gdma_dev *gd);
@@ -719,4 +826,8 @@ void mana_gd_free_memory(struct gdma_mem_info *gmi);
719826

720827
int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
721828
u32 resp_len, void *resp);
829+
830+
int mana_gd_destroy_dma_region(struct gdma_context *gc,
831+
gdma_obj_handle_t dma_region_handle);
832+
722833
#endif /* _GDMA_H */

0 commit comments

Comments
 (0)