@@ -2643,16 +2643,49 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
2643
2643
& hctx -> cpuhp_dead );
2644
2644
}
2645
2645
2646
+ /*
2647
+ * Before freeing hw queue, clearing the flush request reference in
2648
+ * tags->rqs[] for avoiding potential UAF.
2649
+ */
2650
+ static void blk_mq_clear_flush_rq_mapping (struct blk_mq_tags * tags ,
2651
+ unsigned int queue_depth , struct request * flush_rq )
2652
+ {
2653
+ int i ;
2654
+ unsigned long flags ;
2655
+
2656
+ /* The hw queue may not be mapped yet */
2657
+ if (!tags )
2658
+ return ;
2659
+
2660
+ WARN_ON_ONCE (refcount_read (& flush_rq -> ref ) != 0 );
2661
+
2662
+ for (i = 0 ; i < queue_depth ; i ++ )
2663
+ cmpxchg (& tags -> rqs [i ], flush_rq , NULL );
2664
+
2665
+ /*
2666
+ * Wait until all pending iteration is done.
2667
+ *
2668
+ * Request reference is cleared and it is guaranteed to be observed
2669
+ * after the ->lock is released.
2670
+ */
2671
+ spin_lock_irqsave (& tags -> lock , flags );
2672
+ spin_unlock_irqrestore (& tags -> lock , flags );
2673
+ }
2674
+
2646
2675
/* hctx->ctxs will be freed in queue's release handler */
2647
2676
static void blk_mq_exit_hctx (struct request_queue * q ,
2648
2677
struct blk_mq_tag_set * set ,
2649
2678
struct blk_mq_hw_ctx * hctx , unsigned int hctx_idx )
2650
2679
{
2680
+ struct request * flush_rq = hctx -> fq -> flush_rq ;
2681
+
2651
2682
if (blk_mq_hw_queue_mapped (hctx ))
2652
2683
blk_mq_tag_idle (hctx );
2653
2684
2685
+ blk_mq_clear_flush_rq_mapping (set -> tags [hctx_idx ],
2686
+ set -> queue_depth , flush_rq );
2654
2687
if (set -> ops -> exit_request )
2655
- set -> ops -> exit_request (set , hctx -> fq -> flush_rq , hctx_idx );
2688
+ set -> ops -> exit_request (set , flush_rq , hctx_idx );
2656
2689
2657
2690
if (set -> ops -> exit_hctx )
2658
2691
set -> ops -> exit_hctx (hctx , hctx_idx );
0 commit comments