@@ -95,9 +95,9 @@ static void blk_kick_flush(struct request_queue *q,
9595 struct blk_flush_queue * fq , blk_opf_t flags );
9696
9797static inline struct blk_flush_queue *
98- blk_get_flush_queue (struct request_queue * q , struct blk_mq_ctx * ctx )
98+ blk_get_flush_queue (struct blk_mq_ctx * ctx )
9999{
100- return blk_mq_map_queue (q , REQ_OP_FLUSH , ctx )-> fq ;
100+ return blk_mq_map_queue (REQ_OP_FLUSH , ctx )-> fq ;
101101}
102102
103103static unsigned int blk_flush_cur_seq (struct request * rq )
@@ -205,7 +205,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
205205 struct list_head * running ;
206206 struct request * rq , * n ;
207207 unsigned long flags = 0 ;
208- struct blk_flush_queue * fq = blk_get_flush_queue (q , flush_rq -> mq_ctx );
208+ struct blk_flush_queue * fq = blk_get_flush_queue (flush_rq -> mq_ctx );
209209
210210 /* release the tag's ownership to the req cloned from */
211211 spin_lock_irqsave (& fq -> mq_flush_lock , flags );
@@ -341,7 +341,7 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
341341 struct blk_mq_hw_ctx * hctx = rq -> mq_hctx ;
342342 struct blk_mq_ctx * ctx = rq -> mq_ctx ;
343343 unsigned long flags ;
344- struct blk_flush_queue * fq = blk_get_flush_queue (q , ctx );
344+ struct blk_flush_queue * fq = blk_get_flush_queue (ctx );
345345
346346 if (q -> elevator ) {
347347 WARN_ON (rq -> tag < 0 );
@@ -382,7 +382,7 @@ static void blk_rq_init_flush(struct request *rq)
382382bool blk_insert_flush (struct request * rq )
383383{
384384 struct request_queue * q = rq -> q ;
385- struct blk_flush_queue * fq = blk_get_flush_queue (q , rq -> mq_ctx );
385+ struct blk_flush_queue * fq = blk_get_flush_queue (rq -> mq_ctx );
386386 bool supports_fua = q -> limits .features & BLK_FEAT_FUA ;
387387 unsigned int policy = 0 ;
388388
0 commit comments