|
14 | 14 | #include "rvu.h" |
15 | 15 | #include "rvu_reg.h" |
16 | 16 |
|
| 17 | +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ |
| 18 | +static struct _req_type __maybe_unused \ |
| 19 | +*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ |
| 20 | +{ \ |
| 21 | + struct _req_type *req; \ |
| 22 | + \ |
| 23 | + req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ |
| 24 | + &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ |
| 25 | + sizeof(struct _rsp_type)); \ |
| 26 | + if (!req) \ |
| 27 | + return NULL; \ |
| 28 | + req->hdr.sig = OTX2_MBOX_REQ_SIG; \ |
| 29 | + req->hdr.id = _id; \ |
| 30 | + return req; \ |
| 31 | +} |
| 32 | + |
| 33 | +MBOX_UP_REP_MESSAGES |
| 34 | +#undef M |
| 35 | + |
| 36 | +static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) |
| 37 | +{ |
| 38 | + struct rep_event *msg; |
| 39 | + int pf; |
| 40 | + |
| 41 | + pf = rvu_get_pf(event->pcifunc); |
| 42 | + |
| 43 | + mutex_lock(&rvu->mbox_lock); |
| 44 | + msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); |
| 45 | + if (!msg) { |
| 46 | + mutex_unlock(&rvu->mbox_lock); |
| 47 | + return -ENOMEM; |
| 48 | + } |
| 49 | + |
| 50 | + msg->hdr.pcifunc = event->pcifunc; |
| 51 | + msg->event = event->event; |
| 52 | + |
| 53 | + memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data)); |
| 54 | + |
| 55 | + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); |
| 56 | + |
| 57 | + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); |
| 58 | + |
| 59 | + mutex_unlock(&rvu->mbox_lock); |
| 60 | + return 0; |
| 61 | +} |
| 62 | + |
| 63 | +static void rvu_rep_wq_handler(struct work_struct *work) |
| 64 | +{ |
| 65 | + struct rvu *rvu = container_of(work, struct rvu, rep_evt_work); |
| 66 | + struct rep_evtq_ent *qentry; |
| 67 | + struct rep_event *event; |
| 68 | + unsigned long flags; |
| 69 | + |
| 70 | + do { |
| 71 | + spin_lock_irqsave(&rvu->rep_evtq_lock, flags); |
| 72 | + qentry = list_first_entry_or_null(&rvu->rep_evtq_head, |
| 73 | + struct rep_evtq_ent, |
| 74 | + node); |
| 75 | + if (qentry) |
| 76 | + list_del(&qentry->node); |
| 77 | + |
| 78 | + spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags); |
| 79 | + if (!qentry) |
| 80 | + break; /* nothing more to process */ |
| 81 | + |
| 82 | + event = &qentry->event; |
| 83 | + |
| 84 | + rvu_rep_up_notify(rvu, event); |
| 85 | + kfree(qentry); |
| 86 | + } while (1); |
| 87 | +} |
| 88 | + |
| 89 | +int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req, |
| 90 | + struct msg_rsp *rsp) |
| 91 | +{ |
| 92 | + struct rep_evtq_ent *qentry; |
| 93 | + |
| 94 | + qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); |
| 95 | + if (!qentry) |
| 96 | + return -ENOMEM; |
| 97 | + |
| 98 | + qentry->event = *req; |
| 99 | + spin_lock(&rvu->rep_evtq_lock); |
| 100 | + list_add_tail(&qentry->node, &rvu->rep_evtq_head); |
| 101 | + spin_unlock(&rvu->rep_evtq_lock); |
| 102 | + queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work); |
| 103 | + return 0; |
| 104 | +} |
| 105 | + |
| 106 | +int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable) |
| 107 | +{ |
| 108 | + struct rep_event *req; |
| 109 | + int pf; |
| 110 | + |
| 111 | + if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) |
| 112 | + return 0; |
| 113 | + |
| 114 | + pf = rvu_get_pf(rvu->rep_pcifunc); |
| 115 | + |
| 116 | + mutex_lock(&rvu->mbox_lock); |
| 117 | + req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); |
| 118 | + if (!req) { |
| 119 | + mutex_unlock(&rvu->mbox_lock); |
| 120 | + return -ENOMEM; |
| 121 | + } |
| 122 | + |
| 123 | + req->hdr.pcifunc = rvu->rep_pcifunc; |
| 124 | + req->event |= RVU_EVENT_PFVF_STATE; |
| 125 | + req->pcifunc = pcifunc; |
| 126 | + req->evt_data.vf_state = enable; |
| 127 | + |
| 128 | + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); |
| 129 | + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); |
| 130 | + |
| 131 | + mutex_unlock(&rvu->mbox_lock); |
| 132 | + return 0; |
| 133 | +} |
| 134 | + |
17 | 135 | #define RVU_LF_RX_STATS(reg) \ |
18 | 136 | rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg)) |
19 | 137 |
|
@@ -248,6 +366,16 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu) |
248 | 366 | } |
249 | 367 | } |
250 | 368 | } |
| 369 | + |
| 370 | + /* Initialize the wq for handling REP events */ |
| 371 | + spin_lock_init(&rvu->rep_evtq_lock); |
| 372 | + INIT_LIST_HEAD(&rvu->rep_evtq_head); |
| 373 | + INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler); |
| 374 | + rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0); |
| 375 | + if (!rvu->rep_evt_wq) { |
| 376 | + dev_err(rvu->dev, "REP workqueue allocation failed\n"); |
| 377 | + return -ENOMEM; |
| 378 | + } |
251 | 379 | return 0; |
252 | 380 | } |
253 | 381 |
|
|
0 commit comments