@@ -2911,8 +2911,12 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
29112911 struct bfq_iocq_bfqq_data * bfqq_data = & bic -> bfqq_data [a_idx ];
29122912
29132913 /* if a merge has already been setup, then proceed with that first */
2914- if (bfqq -> new_bfqq )
2915- return bfqq -> new_bfqq ;
2914+ new_bfqq = bfqq -> new_bfqq ;
2915+ if (new_bfqq ) {
2916+ while (new_bfqq -> new_bfqq )
2917+ new_bfqq = new_bfqq -> new_bfqq ;
2918+ return new_bfqq ;
2919+ }
29162920
29172921 /*
29182922 * Check delayed stable merge for rotational or non-queueing
@@ -3093,8 +3097,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
30933097}
30943098
30953099
3096- static void
3097- bfq_reassign_last_bfqq ( struct bfq_queue * cur_bfqq , struct bfq_queue * new_bfqq )
3100+ void bfq_reassign_last_bfqq ( struct bfq_queue * cur_bfqq ,
3101+ struct bfq_queue * new_bfqq )
30983102{
30993103 if (cur_bfqq -> entity .parent &&
31003104 cur_bfqq -> entity .parent -> last_bfqq_created == cur_bfqq )
@@ -3125,10 +3129,12 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
31253129 bfq_put_queue (bfqq );
31263130}
31273131
3128- static void
3129- bfq_merge_bfqqs ( struct bfq_data * bfqd , struct bfq_io_cq * bic ,
3130- struct bfq_queue * bfqq , struct bfq_queue * new_bfqq )
3132+ static struct bfq_queue * bfq_merge_bfqqs ( struct bfq_data * bfqd ,
3133+ struct bfq_io_cq * bic ,
3134+ struct bfq_queue * bfqq )
31313135{
3136+ struct bfq_queue * new_bfqq = bfqq -> new_bfqq ;
3137+
31323138 bfq_log_bfqq (bfqd , bfqq , "merging with queue %lu" ,
31333139 (unsigned long )new_bfqq -> pid );
31343140 /* Save weight raising and idle window of the merged queues */
@@ -3222,6 +3228,8 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
32223228 bfq_reassign_last_bfqq (bfqq , new_bfqq );
32233229
32243230 bfq_release_process_ref (bfqd , bfqq );
3231+
3232+ return new_bfqq ;
32253233}
32263234
32273235static bool bfq_allow_bio_merge (struct request_queue * q , struct request * rq ,
@@ -3257,14 +3265,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
32573265 * fulfilled, i.e., bic can be redirected to new_bfqq
32583266 * and bfqq can be put.
32593267 */
3260- bfq_merge_bfqqs (bfqd , bfqd -> bio_bic , bfqq ,
3261- new_bfqq );
3262- /*
3263- * If we get here, bio will be queued into new_queue,
3264- * so use new_bfqq to decide whether bio and rq can be
3265- * merged.
3266- */
3267- bfqq = new_bfqq ;
3268+ while (bfqq != new_bfqq )
3269+ bfqq = bfq_merge_bfqqs (bfqd , bfqd -> bio_bic , bfqq );
32683270
32693271 /*
32703272 * Change also bqfd->bio_bfqq, as
@@ -5432,6 +5434,8 @@ void bfq_put_cooperator(struct bfq_queue *bfqq)
54325434 bfq_put_queue (__bfqq );
54335435 __bfqq = next ;
54345436 }
5437+
5438+ bfq_release_process_ref (bfqq -> bfqd , bfqq );
54355439}
54365440
54375441static void bfq_exit_bfqq (struct bfq_data * bfqd , struct bfq_queue * bfqq )
@@ -5444,8 +5448,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
54445448 bfq_log_bfqq (bfqd , bfqq , "exit_bfqq: %p, %d" , bfqq , bfqq -> ref );
54455449
54465450 bfq_put_cooperator (bfqq );
5447-
5448- bfq_release_process_ref (bfqd , bfqq );
54495451}
54505452
54515453static void bfq_exit_icq_bfqq (struct bfq_io_cq * bic , bool is_sync ,
@@ -5701,9 +5703,7 @@ bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
57015703 * state before killing it.
57025704 */
57035705 bfqq -> bic = bic ;
5704- bfq_merge_bfqqs (bfqd , bic , bfqq , new_bfqq );
5705-
5706- return new_bfqq ;
5706+ return bfq_merge_bfqqs (bfqd , bic , bfqq );
57075707}
57085708
57095709/*
@@ -6158,6 +6158,7 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
61586158 bool waiting , idle_timer_disabled = false;
61596159
61606160 if (new_bfqq ) {
6161+ struct bfq_queue * old_bfqq = bfqq ;
61616162 /*
61626163 * Release the request's reference to the old bfqq
61636164 * and make sure one is taken to the shared queue.
@@ -6174,18 +6175,18 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
61746175 * new_bfqq.
61756176 */
61766177 if (bic_to_bfqq (RQ_BIC (rq ), true,
6177- bfq_actuator_index (bfqd , rq -> bio )) == bfqq )
6178- bfq_merge_bfqqs (bfqd , RQ_BIC (rq ),
6179- bfqq , new_bfqq );
6178+ bfq_actuator_index (bfqd , rq -> bio )) == bfqq ) {
6179+ while (bfqq != new_bfqq )
6180+ bfqq = bfq_merge_bfqqs (bfqd , RQ_BIC (rq ), bfqq );
6181+ }
61806182
6181- bfq_clear_bfqq_just_created (bfqq );
6183+ bfq_clear_bfqq_just_created (old_bfqq );
61826184 /*
61836185 * rq is about to be enqueued into new_bfqq,
61846186 * release rq reference on bfqq
61856187 */
6186- bfq_put_queue (bfqq );
6188+ bfq_put_queue (old_bfqq );
61876189 rq -> elv .priv [1 ] = new_bfqq ;
6188- bfqq = new_bfqq ;
61896190 }
61906191
61916192 bfq_update_io_thinktime (bfqd , bfqq );
@@ -6723,7 +6724,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
67236724{
67246725 bfq_log_bfqq (bfqq -> bfqd , bfqq , "splitting queue" );
67256726
6726- if (bfqq_process_refs (bfqq ) == 1 ) {
6727+ if (bfqq_process_refs (bfqq ) == 1 && ! bfqq -> new_bfqq ) {
67276728 bfqq -> pid = current -> pid ;
67286729 bfq_clear_bfqq_coop (bfqq );
67296730 bfq_clear_bfqq_split_coop (bfqq );
@@ -6733,16 +6734,13 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
67336734 bic_set_bfqq (bic , NULL , true, bfqq -> actuator_idx );
67346735
67356736 bfq_put_cooperator (bfqq );
6736-
6737- bfq_release_process_ref (bfqq -> bfqd , bfqq );
67386737 return NULL ;
67396738}
67406739
6741- static struct bfq_queue * bfq_get_bfqq_handle_split (struct bfq_data * bfqd ,
6742- struct bfq_io_cq * bic ,
6743- struct bio * bio ,
6744- bool split , bool is_sync ,
6745- bool * new_queue )
6740+ static struct bfq_queue *
6741+ __bfq_get_bfqq_handle_split (struct bfq_data * bfqd , struct bfq_io_cq * bic ,
6742+ struct bio * bio , bool split , bool is_sync ,
6743+ bool * new_queue )
67466744{
67476745 unsigned int act_idx = bfq_actuator_index (bfqd , bio );
67486746 struct bfq_queue * bfqq = bic_to_bfqq (bic , is_sync , act_idx );
@@ -6821,6 +6819,84 @@ static void bfq_prepare_request(struct request *rq)
68216819 rq -> elv .priv [0 ] = rq -> elv .priv [1 ] = NULL ;
68226820}
68236821
6822+ static struct bfq_queue * bfq_waker_bfqq (struct bfq_queue * bfqq )
6823+ {
6824+ struct bfq_queue * new_bfqq = bfqq -> new_bfqq ;
6825+ struct bfq_queue * waker_bfqq = bfqq -> waker_bfqq ;
6826+
6827+ if (!waker_bfqq )
6828+ return NULL ;
6829+
6830+ while (new_bfqq ) {
6831+ if (new_bfqq == waker_bfqq ) {
6832+ /*
6833+ * If waker_bfqq is in the merge chain, and current
6834+ * is the only procress.
6835+ */
6836+ if (bfqq_process_refs (waker_bfqq ) == 1 )
6837+ return NULL ;
6838+ break ;
6839+ }
6840+
6841+ new_bfqq = new_bfqq -> new_bfqq ;
6842+ }
6843+
6844+ return waker_bfqq ;
6845+ }
6846+
6847+ static struct bfq_queue * bfq_get_bfqq_handle_split (struct bfq_data * bfqd ,
6848+ struct bfq_io_cq * bic ,
6849+ struct bio * bio ,
6850+ unsigned int idx ,
6851+ bool is_sync )
6852+ {
6853+ struct bfq_queue * waker_bfqq ;
6854+ struct bfq_queue * bfqq ;
6855+ bool new_queue = false;
6856+
6857+ bfqq = __bfq_get_bfqq_handle_split (bfqd , bic , bio , false, is_sync ,
6858+ & new_queue );
6859+ if (unlikely (new_queue ))
6860+ return bfqq ;
6861+
6862+ /* If the queue was seeky for too long, break it apart. */
6863+ if (!bfq_bfqq_coop (bfqq ) || !bfq_bfqq_split_coop (bfqq ) ||
6864+ bic -> bfqq_data [idx ].stably_merged )
6865+ return bfqq ;
6866+
6867+ waker_bfqq = bfq_waker_bfqq (bfqq );
6868+
6869+ /* Update bic before losing reference to bfqq */
6870+ if (bfq_bfqq_in_large_burst (bfqq ))
6871+ bic -> bfqq_data [idx ].saved_in_large_burst = true;
6872+
6873+ bfqq = bfq_split_bfqq (bic , bfqq );
6874+ if (bfqq ) {
6875+ bfq_bfqq_resume_state (bfqq , bfqd , bic , true);
6876+ return bfqq ;
6877+ }
6878+
6879+ bfqq = __bfq_get_bfqq_handle_split (bfqd , bic , bio , true, is_sync , NULL );
6880+ if (unlikely (bfqq == & bfqd -> oom_bfqq ))
6881+ return bfqq ;
6882+
6883+ bfq_bfqq_resume_state (bfqq , bfqd , bic , false);
6884+ bfqq -> waker_bfqq = waker_bfqq ;
6885+ bfqq -> tentative_waker_bfqq = NULL ;
6886+
6887+ /*
6888+ * If the waker queue disappears, then new_bfqq->waker_bfqq must be
6889+ * reset. So insert new_bfqq into the
6890+ * woken_list of the waker. See
6891+ * bfq_check_waker for details.
6892+ */
6893+ if (waker_bfqq )
6894+ hlist_add_head (& bfqq -> woken_list_node ,
6895+ & bfqq -> waker_bfqq -> woken_list );
6896+
6897+ return bfqq ;
6898+ }
6899+
68246900/*
68256901 * If needed, init rq, allocate bfq data structures associated with
68266902 * rq, and increment reference counters in the destination bfq_queue
@@ -6852,8 +6928,6 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
68526928 struct bfq_io_cq * bic ;
68536929 const int is_sync = rq_is_sync (rq );
68546930 struct bfq_queue * bfqq ;
6855- bool new_queue = false;
6856- bool bfqq_already_existing = false, split = false;
68576931 unsigned int a_idx = bfq_actuator_index (bfqd , bio );
68586932
68596933 if (unlikely (!rq -> elv .icq ))
@@ -6870,54 +6944,9 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
68706944 return RQ_BFQQ (rq );
68716945
68726946 bic = icq_to_bic (rq -> elv .icq );
6873-
68746947 bfq_check_ioprio_change (bic , bio );
6875-
68766948 bfq_bic_update_cgroup (bic , bio );
6877-
6878- bfqq = bfq_get_bfqq_handle_split (bfqd , bic , bio , false, is_sync ,
6879- & new_queue );
6880-
6881- if (likely (!new_queue )) {
6882- /* If the queue was seeky for too long, break it apart. */
6883- if (bfq_bfqq_coop (bfqq ) && bfq_bfqq_split_coop (bfqq ) &&
6884- !bic -> bfqq_data [a_idx ].stably_merged ) {
6885- struct bfq_queue * old_bfqq = bfqq ;
6886-
6887- /* Update bic before losing reference to bfqq */
6888- if (bfq_bfqq_in_large_burst (bfqq ))
6889- bic -> bfqq_data [a_idx ].saved_in_large_burst =
6890- true;
6891-
6892- bfqq = bfq_split_bfqq (bic , bfqq );
6893- split = true;
6894-
6895- if (!bfqq ) {
6896- bfqq = bfq_get_bfqq_handle_split (bfqd , bic , bio ,
6897- true, is_sync ,
6898- NULL );
6899- if (unlikely (bfqq == & bfqd -> oom_bfqq ))
6900- bfqq_already_existing = true;
6901- } else
6902- bfqq_already_existing = true;
6903-
6904- if (!bfqq_already_existing ) {
6905- bfqq -> waker_bfqq = old_bfqq -> waker_bfqq ;
6906- bfqq -> tentative_waker_bfqq = NULL ;
6907-
6908- /*
6909- * If the waker queue disappears, then
6910- * new_bfqq->waker_bfqq must be
6911- * reset. So insert new_bfqq into the
6912- * woken_list of the waker. See
6913- * bfq_check_waker for details.
6914- */
6915- if (bfqq -> waker_bfqq )
6916- hlist_add_head (& bfqq -> woken_list_node ,
6917- & bfqq -> waker_bfqq -> woken_list );
6918- }
6919- }
6920- }
6949+ bfqq = bfq_get_bfqq_handle_split (bfqd , bic , bio , a_idx , is_sync );
69216950
69226951 bfqq_request_allocated (bfqq );
69236952 bfqq -> ref ++ ;
@@ -6934,18 +6963,9 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
69346963 * addition, if the queue has also just been split, we have to
69356964 * resume its state.
69366965 */
6937- if (likely (bfqq != & bfqd -> oom_bfqq ) && bfqq_process_refs (bfqq ) == 1 ) {
6966+ if (likely (bfqq != & bfqd -> oom_bfqq ) && !bfqq -> new_bfqq &&
6967+ bfqq_process_refs (bfqq ) == 1 )
69386968 bfqq -> bic = bic ;
6939- if (split ) {
6940- /*
6941- * The queue has just been split from a shared
6942- * queue: restore the idle window and the
6943- * possible weight raising period.
6944- */
6945- bfq_bfqq_resume_state (bfqq , bfqd , bic ,
6946- bfqq_already_existing );
6947- }
6948- }
69496969
69506970 /*
69516971 * Consider bfqq as possibly belonging to a burst of newly
0 commit comments