|
74 | 74 | UBLK_PARAM_TYPE_DMA_ALIGN)
|
75 | 75 |
|
76 | 76 | struct ublk_rq_data {
|
77 |
| - struct llist_node node; |
78 |
| - |
79 | 77 | struct kref ref;
|
80 | 78 | };
|
81 | 79 |
|
@@ -142,8 +140,6 @@ struct ublk_queue {
|
142 | 140 | struct task_struct *ubq_daemon;
|
143 | 141 | char *io_cmd_buf;
|
144 | 142 |
|
145 |
| - struct llist_head io_cmds; |
146 |
| - |
147 | 143 | unsigned long io_addr; /* mapped vm address */
|
148 | 144 | unsigned int max_io_sz;
|
149 | 145 | bool force_abort;
|
@@ -1108,7 +1104,7 @@ static void ublk_complete_rq(struct kref *ref)
|
1108 | 1104 | }
|
1109 | 1105 |
|
1110 | 1106 | /*
|
1111 |
| - * Since __ublk_rq_task_work always fails requests immediately during |
| 1107 | + * Since ublk_rq_task_work_cb always fails requests immediately during |
1112 | 1108 | * exiting, __ublk_fail_req() is only called from abort context during
|
1113 | 1109 | * exiting. So lock is unnecessary.
|
1114 | 1110 | *
|
@@ -1154,11 +1150,14 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
|
1154 | 1150 | blk_mq_end_request(rq, BLK_STS_IOERR);
|
1155 | 1151 | }
|
1156 | 1152 |
|
1157 |
| -static inline void __ublk_rq_task_work(struct request *req, |
1158 |
| - unsigned issue_flags) |
| 1153 | +static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, |
| 1154 | + unsigned int issue_flags) |
1159 | 1155 | {
|
1160 |
| - struct ublk_queue *ubq = req->mq_hctx->driver_data; |
1161 |
| - int tag = req->tag; |
| 1156 | + struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); |
| 1157 | + struct ublk_queue *ubq = pdu->ubq; |
| 1158 | + int tag = pdu->tag; |
| 1159 | + struct request *req = blk_mq_tag_to_rq( |
| 1160 | + ubq->dev->tag_set.tags[ubq->q_id], tag); |
1162 | 1161 | struct ublk_io *io = &ubq->ios[tag];
|
1163 | 1162 | unsigned int mapped_bytes;
|
1164 | 1163 |
|
@@ -1233,34 +1232,11 @@ static inline void __ublk_rq_task_work(struct request *req,
|
1233 | 1232 | ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
|
1234 | 1233 | }
|
1235 | 1234 |
|
1236 |
| -static inline void ublk_forward_io_cmds(struct ublk_queue *ubq, |
1237 |
| - unsigned issue_flags) |
1238 |
| -{ |
1239 |
| - struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); |
1240 |
| - struct ublk_rq_data *data, *tmp; |
1241 |
| - |
1242 |
| - io_cmds = llist_reverse_order(io_cmds); |
1243 |
| - llist_for_each_entry_safe(data, tmp, io_cmds, node) |
1244 |
| - __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags); |
1245 |
| -} |
1246 |
| - |
1247 |
| -static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags) |
1248 |
| -{ |
1249 |
| - struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); |
1250 |
| - struct ublk_queue *ubq = pdu->ubq; |
1251 |
| - |
1252 |
| - ublk_forward_io_cmds(ubq, issue_flags); |
1253 |
| -} |
1254 |
| - |
1255 | 1235 | static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
|
1256 | 1236 | {
|
1257 |
| - struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); |
| 1237 | + struct ublk_io *io = &ubq->ios[rq->tag]; |
1258 | 1238 |
|
1259 |
| - if (llist_add(&data->node, &ubq->io_cmds)) { |
1260 |
| - struct ublk_io *io = &ubq->ios[rq->tag]; |
1261 |
| - |
1262 |
| - io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb); |
1263 |
| - } |
| 1239 | + io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb); |
1264 | 1240 | }
|
1265 | 1241 |
|
1266 | 1242 | static enum blk_eh_timer_return ublk_timeout(struct request *rq)
|
@@ -1453,7 +1429,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
|
1453 | 1429 | struct request *rq;
|
1454 | 1430 |
|
1455 | 1431 | /*
|
1456 |
| - * Either we fail the request or ublk_rq_task_work_fn |
| 1432 | + * Either we fail the request or ublk_rq_task_work_cb |
1457 | 1433 | * will do it
|
1458 | 1434 | */
|
1459 | 1435 | rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
|
|
0 commit comments