Skip to content

Commit 1d2024d

Browse files
author
Ming Lei
committed
ublk: remove io_cmds list in ublk_queue
JIRA: https://issues.redhat.com/browse/RHEL-83595 commit 989bcd6 Author: Uday Shankar <[email protected]> Date: Tue Mar 18 12:14:17 2025 -0600 ublk: remove io_cmds list in ublk_queue The current I/O dispatch mechanism - queueing I/O by adding it to the io_cmds list (and poking task_work as needed), then dispatching it in ublk server task context by reversing io_cmds and completing the io_uring command associated to each one - was introduced by commit 7d4a931 ("ublk_drv: don't forward io commands in reserve order") to ensure that the ublk server received I/O in the same order that the block layer submitted it to ublk_drv. This mechanism was only needed for the "raw" task_work submission mechanism, since the io_uring task work wrapper maintains FIFO ordering (using quite a similar mechanism in fact). The "raw" task_work submission mechanism is no longer supported in ublk_drv as of commit 29dc5d0 ("ublk: kill queuing request by task_work_add"), so the explicit llist/reversal is no longer needed - it just duplicates logic already present in the underlying io_uring APIs. Remove it. Signed-off-by: Uday Shankar <[email protected]> Reviewed-by: Ming Lei <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]> Signed-off-by: Ming Lei <[email protected]>
1 parent 12de855 commit 1d2024d

File tree

1 file changed

+11
-35
lines changed

1 file changed

+11
-35
lines changed

drivers/block/ublk_drv.c

Lines changed: 11 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,6 @@
7474
UBLK_PARAM_TYPE_DMA_ALIGN)
7575

7676
struct ublk_rq_data {
77-
struct llist_node node;
78-
7977
struct kref ref;
8078
};
8179

@@ -142,8 +140,6 @@ struct ublk_queue {
142140
struct task_struct *ubq_daemon;
143141
char *io_cmd_buf;
144142

145-
struct llist_head io_cmds;
146-
147143
unsigned long io_addr; /* mapped vm address */
148144
unsigned int max_io_sz;
149145
bool force_abort;
@@ -1108,7 +1104,7 @@ static void ublk_complete_rq(struct kref *ref)
11081104
}
11091105

11101106
/*
1111-
* Since __ublk_rq_task_work always fails requests immediately during
1107+
* Since ublk_rq_task_work_cb always fails requests immediately during
11121108
* exiting, __ublk_fail_req() is only called from abort context during
11131109
* exiting. So lock is unnecessary.
11141110
*
@@ -1154,11 +1150,14 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
11541150
blk_mq_end_request(rq, BLK_STS_IOERR);
11551151
}
11561152

1157-
static inline void __ublk_rq_task_work(struct request *req,
1158-
unsigned issue_flags)
1153+
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
1154+
unsigned int issue_flags)
11591155
{
1160-
struct ublk_queue *ubq = req->mq_hctx->driver_data;
1161-
int tag = req->tag;
1156+
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1157+
struct ublk_queue *ubq = pdu->ubq;
1158+
int tag = pdu->tag;
1159+
struct request *req = blk_mq_tag_to_rq(
1160+
ubq->dev->tag_set.tags[ubq->q_id], tag);
11621161
struct ublk_io *io = &ubq->ios[tag];
11631162
unsigned int mapped_bytes;
11641163

@@ -1233,34 +1232,11 @@ static inline void __ublk_rq_task_work(struct request *req,
12331232
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
12341233
}
12351234

1236-
static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
1237-
unsigned issue_flags)
1238-
{
1239-
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
1240-
struct ublk_rq_data *data, *tmp;
1241-
1242-
io_cmds = llist_reverse_order(io_cmds);
1243-
llist_for_each_entry_safe(data, tmp, io_cmds, node)
1244-
__ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
1245-
}
1246-
1247-
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
1248-
{
1249-
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1250-
struct ublk_queue *ubq = pdu->ubq;
1251-
1252-
ublk_forward_io_cmds(ubq, issue_flags);
1253-
}
1254-
12551235
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
12561236
{
1257-
struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
1237+
struct ublk_io *io = &ubq->ios[rq->tag];
12581238

1259-
if (llist_add(&data->node, &ubq->io_cmds)) {
1260-
struct ublk_io *io = &ubq->ios[rq->tag];
1261-
1262-
io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
1263-
}
1239+
io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
12641240
}
12651241

12661242
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
@@ -1453,7 +1429,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
14531429
struct request *rq;
14541430

14551431
/*
1456-
* Either we fail the request or ublk_rq_task_work_fn
1432+
* Either we fail the request or ublk_rq_task_work_cb
14571433
* will do it
14581434
*/
14591435
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);

0 commit comments

Comments
 (0)