Skip to content

Commit 2f8f133

Browse files
Ming Leiaxboe
authored andcommitted
blk-mq: always free hctx after request queue is freed
In normal queue cleanup path, hctx is released after request queue is freed, see blk_mq_release(). However, in __blk_mq_update_nr_hw_queues(), hctx may be freed because of hw queues shrinking. This way is easy to cause use-after-free, because: one implicit rule is that it is safe to call almost all block layer APIs if the request queue is alive; and one hctx may be retrieved by one API, then the hctx can be freed by blk_mq_update_nr_hw_queues(); finally use-after-free is triggered. Fixes this issue by always freeing hctx after releasing request queue. If some hctxs are removed in blk_mq_update_nr_hw_queues(), introduce a per-queue list to hold them, then try to resuse these hctxs if numa node is matched. Cc: Dongli Zhang <[email protected]> Cc: James Smart <[email protected]> Cc: Bart Van Assche <[email protected]> Cc: [email protected], Cc: Martin K . Petersen <[email protected]>, Cc: Christoph Hellwig <[email protected]>, Cc: James E . J . Bottomley <[email protected]>, Reviewed-by: Hannes Reinecke <[email protected]> Tested-by: James Smart <[email protected]> Signed-off-by: Ming Lei <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 7c6c5b7 commit 2f8f133

File tree

3 files changed

+42
-13
lines changed

3 files changed

+42
-13
lines changed

block/blk-mq.c

Lines changed: 33 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2269,6 +2269,10 @@ static void blk_mq_exit_hctx(struct request_queue *q,
22692269
set->ops->exit_hctx(hctx, hctx_idx);
22702270

22712271
blk_mq_remove_cpuhp(hctx);
2272+
2273+
spin_lock(&q->unused_hctx_lock);
2274+
list_add(&hctx->hctx_list, &q->unused_hctx_list);
2275+
spin_unlock(&q->unused_hctx_lock);
22722276
}
22732277

22742278
static void blk_mq_exit_hw_queues(struct request_queue *q,
@@ -2351,6 +2355,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
23512355
hctx->queue = q;
23522356
hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
23532357

2358+
INIT_LIST_HEAD(&hctx->hctx_list);
2359+
23542360
/*
23552361
* Allocate space for all possible cpus to avoid allocation at
23562362
* runtime
@@ -2664,15 +2670,17 @@ static int blk_mq_alloc_ctxs(struct request_queue *q)
26642670
*/
26652671
void blk_mq_release(struct request_queue *q)
26662672
{
2667-
struct blk_mq_hw_ctx *hctx;
2668-
unsigned int i;
2673+
struct blk_mq_hw_ctx *hctx, *next;
2674+
int i;
26692675

26702676
cancel_delayed_work_sync(&q->requeue_work);
26712677

2672-
/* hctx kobj stays in hctx */
2673-
queue_for_each_hw_ctx(q, hctx, i) {
2674-
if (!hctx)
2675-
continue;
2678+
queue_for_each_hw_ctx(q, hctx, i)
2679+
WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
2680+
2681+
/* all hctx are in .unused_hctx_list now */
2682+
list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
2683+
list_del_init(&hctx->hctx_list);
26762684
kobject_put(&hctx->kobj);
26772685
}
26782686

@@ -2739,9 +2747,22 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
27392747
struct blk_mq_tag_set *set, struct request_queue *q,
27402748
int hctx_idx, int node)
27412749
{
2742-
struct blk_mq_hw_ctx *hctx;
2750+
struct blk_mq_hw_ctx *hctx = NULL, *tmp;
27432751

2744-
hctx = blk_mq_alloc_hctx(q, set, node);
2752+
/* reuse dead hctx first */
2753+
spin_lock(&q->unused_hctx_lock);
2754+
list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
2755+
if (tmp->numa_node == node) {
2756+
hctx = tmp;
2757+
break;
2758+
}
2759+
}
2760+
if (hctx)
2761+
list_del_init(&hctx->hctx_list);
2762+
spin_unlock(&q->unused_hctx_lock);
2763+
2764+
if (!hctx)
2765+
hctx = blk_mq_alloc_hctx(q, set, node);
27452766
if (!hctx)
27462767
goto fail;
27472768

@@ -2779,10 +2800,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
27792800

27802801
hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
27812802
if (hctx) {
2782-
if (hctxs[i]) {
2803+
if (hctxs[i])
27832804
blk_mq_exit_hctx(q, set, hctxs[i], i);
2784-
kobject_put(&hctxs[i]->kobj);
2785-
}
27862805
hctxs[i] = hctx;
27872806
} else {
27882807
if (hctxs[i])
@@ -2813,9 +2832,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
28132832
if (hctx->tags)
28142833
blk_mq_free_map_and_requests(set, j);
28152834
blk_mq_exit_hctx(q, set, hctx, j);
2816-
kobject_put(&hctx->kobj);
28172835
hctxs[j] = NULL;
2818-
28192836
}
28202837
}
28212838
mutex_unlock(&q->sysfs_lock);
@@ -2858,6 +2875,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
28582875
if (!q->queue_hw_ctx)
28592876
goto err_sys_init;
28602877

2878+
INIT_LIST_HEAD(&q->unused_hctx_list);
2879+
spin_lock_init(&q->unused_hctx_lock);
2880+
28612881
blk_mq_realloc_hw_ctxs(set, q);
28622882
if (!q->nr_hw_queues)
28632883
goto err_hctxs;

include/linux/blk-mq.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,8 @@ struct blk_mq_hw_ctx {
7070
struct dentry *sched_debugfs_dir;
7171
#endif
7272

73+
struct list_head hctx_list;
74+
7375
/* Must be the last member - see also blk_mq_hw_ctx_size(). */
7476
struct srcu_struct srcu[0];
7577
};

include/linux/blkdev.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -535,6 +535,13 @@ struct request_queue {
535535

536536
struct mutex sysfs_lock;
537537

538+
/*
539+
* for reusing dead hctx instance in case of updating
540+
* nr_hw_queues
541+
*/
542+
struct list_head unused_hctx_list;
543+
spinlock_t unused_hctx_lock;
544+
538545
atomic_t mq_freeze_depth;
539546

540547
#if defined(CONFIG_BLK_DEV_BSG)

0 commit comments

Comments
 (0)