@@ -159,16 +159,17 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
159159EXPORT_SYMBOL (blk_mq_can_queue );
160160
161161static void blk_mq_rq_ctx_init (struct request_queue * q , struct blk_mq_ctx * ctx ,
162- struct request * rq , unsigned int rw_flags )
162+ struct request * rq , int op ,
163+ unsigned int op_flags )
163164{
164165 if (blk_queue_io_stat (q ))
165- rw_flags |= REQ_IO_STAT ;
166+ op_flags |= REQ_IO_STAT ;
166167
167168 INIT_LIST_HEAD (& rq -> queuelist );
168169 /* csd/requeue_work/fifo_time is initialized before use */
169170 rq -> q = q ;
170171 rq -> mq_ctx = ctx ;
171- rq -> cmd_flags |= rw_flags ;
172+ req_set_op_attrs ( rq , op , op_flags ) ;
172173 /* do not touch atomic flags, it needs atomic ops against the timer */
173174 rq -> cpu = -1 ;
174175 INIT_HLIST_NODE (& rq -> hash );
@@ -203,11 +204,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
203204 rq -> end_io_data = NULL ;
204205 rq -> next_rq = NULL ;
205206
206- ctx -> rq_dispatched [rw_is_sync (rw_flags )]++ ;
207+ ctx -> rq_dispatched [rw_is_sync (op | op_flags )]++ ;
207208}
208209
209210static struct request *
210- __blk_mq_alloc_request (struct blk_mq_alloc_data * data , int rw )
211+ __blk_mq_alloc_request (struct blk_mq_alloc_data * data , int op , int op_flags )
211212{
212213 struct request * rq ;
213214 unsigned int tag ;
@@ -222,7 +223,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
222223 }
223224
224225 rq -> tag = tag ;
225- blk_mq_rq_ctx_init (data -> q , data -> ctx , rq , rw );
226+ blk_mq_rq_ctx_init (data -> q , data -> ctx , rq , op , op_flags );
226227 return rq ;
227228 }
228229
@@ -246,15 +247,15 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
246247 hctx = q -> mq_ops -> map_queue (q , ctx -> cpu );
247248 blk_mq_set_alloc_data (& alloc_data , q , flags , ctx , hctx );
248249
249- rq = __blk_mq_alloc_request (& alloc_data , rw );
250+ rq = __blk_mq_alloc_request (& alloc_data , rw , 0 );
250251 if (!rq && !(flags & BLK_MQ_REQ_NOWAIT )) {
251252 __blk_mq_run_hw_queue (hctx );
252253 blk_mq_put_ctx (ctx );
253254
254255 ctx = blk_mq_get_ctx (q );
255256 hctx = q -> mq_ops -> map_queue (q , ctx -> cpu );
256257 blk_mq_set_alloc_data (& alloc_data , q , flags , ctx , hctx );
257- rq = __blk_mq_alloc_request (& alloc_data , rw );
258+ rq = __blk_mq_alloc_request (& alloc_data , rw , 0 );
258259 ctx = alloc_data .ctx ;
259260 }
260261 blk_mq_put_ctx (ctx );
@@ -1169,28 +1170,29 @@ static struct request *blk_mq_map_request(struct request_queue *q,
11691170 struct blk_mq_hw_ctx * hctx ;
11701171 struct blk_mq_ctx * ctx ;
11711172 struct request * rq ;
1172- int rw = bio_data_dir (bio );
1173+ int op = bio_data_dir (bio );
1174+ int op_flags = 0 ;
11731175 struct blk_mq_alloc_data alloc_data ;
11741176
11751177 blk_queue_enter_live (q );
11761178 ctx = blk_mq_get_ctx (q );
11771179 hctx = q -> mq_ops -> map_queue (q , ctx -> cpu );
11781180
11791181 if (rw_is_sync (bio -> bi_rw ))
1180- rw |= REQ_SYNC ;
1182+ op_flags |= REQ_SYNC ;
11811183
1182- trace_block_getrq (q , bio , rw );
1184+ trace_block_getrq (q , bio , op );
11831185 blk_mq_set_alloc_data (& alloc_data , q , BLK_MQ_REQ_NOWAIT , ctx , hctx );
1184- rq = __blk_mq_alloc_request (& alloc_data , rw );
1186+ rq = __blk_mq_alloc_request (& alloc_data , op , op_flags );
11851187 if (unlikely (!rq )) {
11861188 __blk_mq_run_hw_queue (hctx );
11871189 blk_mq_put_ctx (ctx );
1188- trace_block_sleeprq (q , bio , rw );
1190+ trace_block_sleeprq (q , bio , op );
11891191
11901192 ctx = blk_mq_get_ctx (q );
11911193 hctx = q -> mq_ops -> map_queue (q , ctx -> cpu );
11921194 blk_mq_set_alloc_data (& alloc_data , q , 0 , ctx , hctx );
1193- rq = __blk_mq_alloc_request (& alloc_data , rw );
1195+ rq = __blk_mq_alloc_request (& alloc_data , op , op_flags );
11941196 ctx = alloc_data .ctx ;
11951197 hctx = alloc_data .hctx ;
11961198 }
0 commit comments