Skip to content

Commit 0a47d2b

Browse files
Ming Leiaxboe
authored andcommitted
block: don't acquire ->elevator_lock in blk_mq_map_swqueue and blk_mq_realloc_hw_ctxs
Both blk_mq_map_swqueue() and blk_mq_realloc_hw_ctxs() are called before the request queue is added to tagset list, so the two won't run concurrently with blk_mq_update_nr_hw_queues(). When the two functions are only called from queue initialization or blk_mq_update_nr_hw_queues(), elevator switch can't happen. So remove ->elevator_lock uses from the two functions. Reviewed-by: Hannes Reinecke <[email protected]> Reviewed-by: Nilay Shroff <[email protected]> Signed-off-by: Ming Lei <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jens Axboe <[email protected]>
1 parent 9dc7a88 commit 0a47d2b

File tree

1 file changed

+4
-15
lines changed

1 file changed

+4
-15
lines changed

block/blk-mq.c

Lines changed: 4 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -4156,8 +4156,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
41564156
struct blk_mq_ctx *ctx;
41574157
struct blk_mq_tag_set *set = q->tag_set;
41584158

4159-
mutex_lock(&q->elevator_lock);
4160-
41614159
queue_for_each_hw_ctx(q, hctx, i) {
41624160
cpumask_clear(hctx->cpumask);
41634161
hctx->nr_ctx = 0;
@@ -4262,8 +4260,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
42624260
hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
42634261
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
42644262
}
4265-
4266-
mutex_unlock(&q->elevator_lock);
42674263
}
42684264

42694265
/*
@@ -4567,16 +4563,9 @@ static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
45674563
}
45684564

45694565
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4570-
struct request_queue *q, bool lock)
4566+
struct request_queue *q)
45714567
{
4572-
if (lock) {
4573-
/* protect against switching io scheduler */
4574-
mutex_lock(&q->elevator_lock);
4575-
__blk_mq_realloc_hw_ctxs(set, q);
4576-
mutex_unlock(&q->elevator_lock);
4577-
} else {
4578-
__blk_mq_realloc_hw_ctxs(set, q);
4579-
}
4568+
__blk_mq_realloc_hw_ctxs(set, q);
45804569

45814570
/* unregister cpuhp callbacks for exited hctxs */
45824571
blk_mq_remove_hw_queues_cpuhp(q);
@@ -4608,7 +4597,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
46084597

46094598
xa_init(&q->hctx_table);
46104599

4611-
blk_mq_realloc_hw_ctxs(set, q, false);
4600+
blk_mq_realloc_hw_ctxs(set, q);
46124601
if (!q->nr_hw_queues)
46134602
goto err_hctxs;
46144603

@@ -5019,7 +5008,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
50195008
fallback:
50205009
blk_mq_update_queue_map(set);
50215010
list_for_each_entry(q, &set->tag_list, tag_set_list) {
5022-
blk_mq_realloc_hw_ctxs(set, q, true);
5011+
blk_mq_realloc_hw_ctxs(set, q);
50235012

50245013
if (q->nr_hw_queues != set->nr_hw_queues) {
50255014
int i = prev_nr_hw_queues;

0 commit comments

Comments
 (0)