@@ -959,10 +959,10 @@ static void __freed_request(struct request_list *rl, int sync)
959959 * A request has just been released. Account for it, update the full and
960960 * congestion status, wake up any waiters. Called under q->queue_lock.
961961 */
962- static void freed_request (struct request_list * rl , unsigned int flags )
962+ static void freed_request (struct request_list * rl , int op , unsigned int flags )
963963{
964964 struct request_queue * q = rl -> q ;
965- int sync = rw_is_sync (flags );
965+ int sync = rw_is_sync (op | flags );
966966
967967 q -> nr_rqs [sync ]-- ;
968968 rl -> count [sync ]-- ;
@@ -1054,7 +1054,8 @@ static struct io_context *rq_ioc(struct bio *bio)
10541054/**
10551055 * __get_request - get a free request
10561056 * @rl: request list to allocate from
1057- * @rw_flags: RW and SYNC flags
1057+ * @op: REQ_OP_READ/REQ_OP_WRITE
1058+ * @op_flags: rq_flag_bits
10581059 * @bio: bio to allocate request for (can be %NULL)
10591060 * @gfp_mask: allocation mask
10601061 *
@@ -1065,21 +1066,22 @@ static struct io_context *rq_ioc(struct bio *bio)
10651066 * Returns ERR_PTR on failure, with @q->queue_lock held.
10661067 * Returns request pointer on success, with @q->queue_lock *not held*.
10671068 */
1068- static struct request * __get_request (struct request_list * rl , int rw_flags ,
1069- struct bio * bio , gfp_t gfp_mask )
1069+ static struct request * __get_request (struct request_list * rl , int op ,
1070+ int op_flags , struct bio * bio ,
1071+ gfp_t gfp_mask )
10701072{
10711073 struct request_queue * q = rl -> q ;
10721074 struct request * rq ;
10731075 struct elevator_type * et = q -> elevator -> type ;
10741076 struct io_context * ioc = rq_ioc (bio );
10751077 struct io_cq * icq = NULL ;
1076- const bool is_sync = rw_is_sync (rw_flags ) != 0 ;
1078+ const bool is_sync = rw_is_sync (op | op_flags ) != 0 ;
10771079 int may_queue ;
10781080
10791081 if (unlikely (blk_queue_dying (q )))
10801082 return ERR_PTR (- ENODEV );
10811083
1082- may_queue = elv_may_queue (q , rw_flags );
1084+ may_queue = elv_may_queue (q , op | op_flags );
10831085 if (may_queue == ELV_MQUEUE_NO )
10841086 goto rq_starved ;
10851087
@@ -1123,7 +1125,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
11231125
11241126 /*
11251127 * Decide whether the new request will be managed by elevator. If
1126- * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
1128+ * so, mark @op_flags and increment elvpriv. Non-zero elvpriv will
11271129 * prevent the current elevator from being destroyed until the new
11281130 * request is freed. This guarantees icq's won't be destroyed and
11291131 * makes creating new ones safe.
@@ -1132,14 +1134,14 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
11321134 * it will be created after releasing queue_lock.
11331135 */
11341136 if (blk_rq_should_init_elevator (bio ) && !blk_queue_bypass (q )) {
1135- rw_flags |= REQ_ELVPRIV ;
1137+ op_flags |= REQ_ELVPRIV ;
11361138 q -> nr_rqs_elvpriv ++ ;
11371139 if (et -> icq_cache && ioc )
11381140 icq = ioc_lookup_icq (ioc , q );
11391141 }
11401142
11411143 if (blk_queue_io_stat (q ))
1142- rw_flags |= REQ_IO_STAT ;
1144+ op_flags |= REQ_IO_STAT ;
11431145 spin_unlock_irq (q -> queue_lock );
11441146
11451147 /* allocate and init request */
@@ -1149,10 +1151,10 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
11491151
11501152 blk_rq_init (q , rq );
11511153 blk_rq_set_rl (rq , rl );
1152- rq -> cmd_flags = rw_flags | REQ_ALLOCED ;
1154+ req_set_op_attrs ( rq , op , op_flags | REQ_ALLOCED ) ;
11531155
11541156 /* init elvpriv */
1155- if (rw_flags & REQ_ELVPRIV ) {
1157+ if (op_flags & REQ_ELVPRIV ) {
11561158 if (unlikely (et -> icq_cache && !icq )) {
11571159 if (ioc )
11581160 icq = ioc_create_icq (ioc , q , gfp_mask );
@@ -1178,7 +1180,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
11781180 if (ioc_batching (q , ioc ))
11791181 ioc -> nr_batch_requests -- ;
11801182
1181- trace_block_getrq (q , bio , rw_flags & 1 );
1183+ trace_block_getrq (q , bio , op );
11821184 return rq ;
11831185
11841186fail_elvpriv :
@@ -1208,7 +1210,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
12081210 * queue, but this is pretty rare.
12091211 */
12101212 spin_lock_irq (q -> queue_lock );
1211- freed_request (rl , rw_flags );
1213+ freed_request (rl , op , op_flags );
12121214
12131215 /*
12141216 * in the very unlikely event that allocation failed and no
@@ -1226,7 +1228,8 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
12261228/**
12271229 * get_request - get a free request
12281230 * @q: request_queue to allocate request from
1229- * @rw_flags: RW and SYNC flags
1231+ * @op: REQ_OP_READ/REQ_OP_WRITE
1232+ * @op_flags: rq_flag_bits
12301233 * @bio: bio to allocate request for (can be %NULL)
12311234 * @gfp_mask: allocation mask
12321235 *
@@ -1237,17 +1240,18 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
12371240 * Returns ERR_PTR on failure, with @q->queue_lock held.
12381241 * Returns request pointer on success, with @q->queue_lock *not held*.
12391242 */
1240- static struct request * get_request (struct request_queue * q , int rw_flags ,
1241- struct bio * bio , gfp_t gfp_mask )
1243+ static struct request * get_request (struct request_queue * q , int op ,
1244+ int op_flags , struct bio * bio ,
1245+ gfp_t gfp_mask )
12421246{
1243- const bool is_sync = rw_is_sync (rw_flags ) != 0 ;
1247+ const bool is_sync = rw_is_sync (op | op_flags ) != 0 ;
12441248 DEFINE_WAIT (wait );
12451249 struct request_list * rl ;
12461250 struct request * rq ;
12471251
12481252 rl = blk_get_rl (q , bio ); /* transferred to @rq on success */
12491253retry :
1250- rq = __get_request (rl , rw_flags , bio , gfp_mask );
1254+ rq = __get_request (rl , op , op_flags , bio , gfp_mask );
12511255 if (!IS_ERR (rq ))
12521256 return rq ;
12531257
@@ -1260,7 +1264,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
12601264 prepare_to_wait_exclusive (& rl -> wait [is_sync ], & wait ,
12611265 TASK_UNINTERRUPTIBLE );
12621266
1263- trace_block_sleeprq (q , bio , rw_flags & 1 );
1267+ trace_block_sleeprq (q , bio , op );
12641268
12651269 spin_unlock_irq (q -> queue_lock );
12661270 io_schedule ();
@@ -1289,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
12891293 create_io_context (gfp_mask , q -> node );
12901294
12911295 spin_lock_irq (q -> queue_lock );
1292- rq = get_request (q , rw , NULL , gfp_mask );
1296+ rq = get_request (q , rw , 0 , NULL , gfp_mask );
12931297 if (IS_ERR (rq ))
12941298 spin_unlock_irq (q -> queue_lock );
12951299 /* q->queue_lock is unlocked at this point */
@@ -1491,13 +1495,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
14911495 */
14921496 if (req -> cmd_flags & REQ_ALLOCED ) {
14931497 unsigned int flags = req -> cmd_flags ;
1498+ int op = req_op (req );
14941499 struct request_list * rl = blk_rq_rl (req );
14951500
14961501 BUG_ON (!list_empty (& req -> queuelist ));
14971502 BUG_ON (ELV_ON_HASH (req ));
14981503
14991504 blk_free_request (rl , req );
1500- freed_request (rl , flags );
1505+ freed_request (rl , op , flags );
15011506 blk_put_rl (rl );
15021507 }
15031508}
@@ -1712,7 +1717,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
17121717{
17131718 const bool sync = !!(bio -> bi_rw & REQ_SYNC );
17141719 struct blk_plug * plug ;
1715- int el_ret , rw_flags , where = ELEVATOR_INSERT_SORT ;
1720+ int el_ret , rw_flags = 0 , where = ELEVATOR_INSERT_SORT ;
17161721 struct request * req ;
17171722 unsigned int request_count = 0 ;
17181723
@@ -1772,15 +1777,14 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
17721777 * but we need to set it earlier to expose the sync flag to the
17731778 * rq allocator and io schedulers.
17741779 */
1775- rw_flags = bio_data_dir (bio );
17761780 if (sync )
17771781 rw_flags |= REQ_SYNC ;
17781782
17791783 /*
17801784 * Grab a free request. This is might sleep but can not fail.
17811785 * Returns with the queue unlocked.
17821786 */
1783- req = get_request (q , rw_flags , bio , GFP_NOIO );
1787+ req = get_request (q , bio_data_dir ( bio ), rw_flags , bio , GFP_NOIO );
17841788 if (IS_ERR (req )) {
17851789 bio -> bi_error = PTR_ERR (req );
17861790 bio_endio (bio );
0 commit comments