Skip to content

Commit c86019f

Browse files
old-memoriesaxboe
authored andcommitted
ublk_drv: add support for UBLK_IO_NEED_GET_DATA
UBLK_IO_NEED_GET_DATA is one ublk IO command. It is designed for a user application who wants to allocate IO buffer and set IO buffer address only after it receives an IO request from ublksrv. This is a reasonable scenario because these users may use a RPC framework as one IO backend to handle IO requests passed from ublksrv. And a RPC framework may allocate its own buffer(or memory pool). This new feature (UBLK_F_NEED_GET_DATA) is optional for ublk users. Related userspace code has been added in ublksrv[1] as one pull request. Test cases for this feature are added in ublksrv and all the tests pass. The performance result shows that this new feature does bring additional latency because one IO is issued back to ublk_drv once again to copy data from bio vectors to user-provided data buffer. UBLK_IO_NEED_GET_DATA is suitable for bigger block size such as 512B or 1MB. [1] https://github.com/ming1/ubdsrv Signed-off-by: ZiyangZhang <[email protected]> Reviewed-by: Ming Lei <[email protected]> Link: https://lore.kernel.org/r/3a21007ea1be8304246e654cebbd581ab0012623.1659011443.git.ZiyangZhang@linux.alibaba.com Signed-off-by: Jens Axboe <[email protected]>
1 parent 4e18403 commit c86019f

File tree

1 file changed

+94
-12
lines changed

1 file changed

+94
-12
lines changed

drivers/block/ublk_drv.c

Lines changed: 94 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,9 @@
4747
#define UBLK_MINORS (1U << MINORBITS)
4848

4949
/* All UBLK_F_* have to be included into UBLK_F_ALL */
50-
#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_URING_CMD_COMP_IN_TASK)
50+
#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
51+
| UBLK_F_URING_CMD_COMP_IN_TASK \
52+
| UBLK_F_NEED_GET_DATA)
5153

5254
/* All UBLK_PARAM_TYPE_* should be included here */
5355
#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
@@ -89,6 +91,15 @@ struct ublk_uring_cmd_pdu {
8991
*/
9092
#define UBLK_IO_FLAG_ABORTED 0x04
9193

94+
/*
95+
* UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
96+
* get data buffer address from ublksrv.
97+
*
98+
* Then, bio data could be copied into this data buffer for a WRITE request
99+
* after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
100+
*/
101+
#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
102+
92103
struct ublk_io {
93104
/* userspace buffer address from io cmd */
94105
__u64 addr;
@@ -262,6 +273,13 @@ static inline bool ublk_can_use_task_work(const struct ublk_queue *ubq)
262273
return false;
263274
}
264275

276+
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
277+
{
278+
if (ubq->flags & UBLK_F_NEED_GET_DATA)
279+
return true;
280+
return false;
281+
}
282+
265283
static struct ublk_device *ublk_get_device(struct ublk_device *ub)
266284
{
267285
if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
@@ -603,6 +621,21 @@ static void __ublk_fail_req(struct ublk_io *io, struct request *req)
603621
}
604622
}
605623

624+
static void ubq_complete_io_cmd(struct ublk_io *io, int res)
625+
{
626+
/* mark this cmd owned by ublksrv */
627+
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
628+
629+
/*
630+
* clear ACTIVE since we are done with this sqe/cmd slot
631+
* We can only accept io cmd in case of being not active.
632+
*/
633+
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
634+
635+
/* tell ublksrv one io request is coming */
636+
io_uring_cmd_done(io->cmd, res, 0);
637+
}
638+
606639
#define UBLK_REQUEUE_DELAY_MS 3
607640

608641
static inline void __ublk_rq_task_work(struct request *req)
@@ -625,6 +658,30 @@ static inline void __ublk_rq_task_work(struct request *req)
625658
return;
626659
}
627660

661+
if (ublk_need_get_data(ubq) &&
662+
(req_op(req) == REQ_OP_WRITE ||
663+
req_op(req) == REQ_OP_FLUSH)) {
664+
/*
665+
* We have not handled UBLK_IO_NEED_GET_DATA command yet,
666+
* so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
667+
* and notify it.
668+
*/
669+
if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
670+
io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
671+
pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
672+
__func__, io->cmd->cmd_op, ubq->q_id,
673+
req->tag, io->flags);
674+
ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
675+
return;
676+
}
677+
/*
678+
* We have handled UBLK_IO_NEED_GET_DATA command,
679+
* so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
680+
* do the copy work.
681+
*/
682+
io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
683+
}
684+
628685
mapped_bytes = ublk_map_io(ubq, req, io);
629686

630687
/* partially mapped, update io descriptor */
@@ -647,17 +704,7 @@ static inline void __ublk_rq_task_work(struct request *req)
647704
mapped_bytes >> 9;
648705
}
649706

650-
/* mark this cmd owned by ublksrv */
651-
io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
652-
653-
/*
654-
* clear ACTIVE since we are done with this sqe/cmd slot
655-
* We can only accept io cmd in case of being not active.
656-
*/
657-
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
658-
659-
/* tell ublksrv one io request is coming */
660-
io_uring_cmd_done(io->cmd, UBLK_IO_RES_OK, 0);
707+
ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
661708
}
662709

663710
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
@@ -946,6 +993,25 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
946993
mutex_unlock(&ub->mutex);
947994
}
948995

996+
static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
997+
int tag, struct io_uring_cmd *cmd)
998+
{
999+
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1000+
struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1001+
1002+
if (ublk_can_use_task_work(ubq)) {
1003+
struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
1004+
1005+
/* should not fail since we call it just in ubq->ubq_daemon */
1006+
task_work_add(ubq->ubq_daemon, &data->work, TWA_SIGNAL_NO_IPI);
1007+
} else {
1008+
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1009+
1010+
pdu->req = req;
1011+
io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
1012+
}
1013+
}
1014+
9491015
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
9501016
{
9511017
struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
@@ -984,6 +1050,14 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
9841050
goto out;
9851051
}
9861052

1053+
/*
1054+
* ensure that the user issues UBLK_IO_NEED_GET_DATA
1055+
* iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1056+
*/
1057+
if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
1058+
^ (cmd_op == UBLK_IO_NEED_GET_DATA))
1059+
goto out;
1060+
9871061
switch (cmd_op) {
9881062
case UBLK_IO_FETCH_REQ:
9891063
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
@@ -1017,6 +1091,14 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
10171091
io->cmd = cmd;
10181092
ublk_commit_completion(ub, ub_cmd);
10191093
break;
1094+
case UBLK_IO_NEED_GET_DATA:
1095+
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1096+
goto out;
1097+
io->addr = ub_cmd->addr;
1098+
io->cmd = cmd;
1099+
io->flags |= UBLK_IO_FLAG_ACTIVE;
1100+
ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag, cmd);
1101+
break;
10201102
default:
10211103
goto out;
10221104
}

0 commit comments

Comments
 (0)