Skip to content

Commit c9aa889

Browse files
Vishal Vermaliu-song-6
authored andcommitted
md: raid10 add nowait support
This adds nowait support to the RAID10 driver. Very similar to raid1 driver changes. It makes RAID10 driver return with EAGAIN for situations where it could wait for eg: - Waiting for the barrier, - Reshape operation, - Discard operation. wait_barrier() and regular_request_wait() fn are modified to return bool to support error for wait barriers. They returns true in case of wait or if wait is not required and returns false if wait was required but not performed to support nowait. Reviewed-by: Jens Axboe <[email protected]> Signed-off-by: Vishal Verma <[email protected]> Signed-off-by: Song Liu <[email protected]>
1 parent 5aa7050 commit c9aa889

File tree

1 file changed

+67
-33
lines changed

1 file changed

+67
-33
lines changed

drivers/md/raid10.c

Lines changed: 67 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -952,8 +952,10 @@ static void lower_barrier(struct r10conf *conf)
952952
wake_up(&conf->wait_barrier);
953953
}
954954

955-
static void wait_barrier(struct r10conf *conf)
955+
static bool wait_barrier(struct r10conf *conf, bool nowait)
956956
{
957+
bool ret = true;
958+
957959
spin_lock_irq(&conf->resync_lock);
958960
if (conf->barrier) {
959961
struct bio_list *bio_list = current->bio_list;
@@ -967,27 +969,35 @@ static void wait_barrier(struct r10conf *conf)
967969
* that queue to get the nr_pending
968970
* count down.
969971
*/
970-
raid10_log(conf->mddev, "wait barrier");
971-
wait_event_lock_irq(conf->wait_barrier,
972-
!conf->barrier ||
973-
(atomic_read(&conf->nr_pending) &&
974-
bio_list &&
975-
(!bio_list_empty(&bio_list[0]) ||
976-
!bio_list_empty(&bio_list[1]))) ||
977-
/* move on if recovery thread is
978-
* blocked by us
979-
*/
980-
(conf->mddev->thread->tsk == current &&
981-
test_bit(MD_RECOVERY_RUNNING,
982-
&conf->mddev->recovery) &&
983-
conf->nr_queued > 0),
984-
conf->resync_lock);
972+
/* Return false when nowait flag is set */
973+
if (nowait) {
974+
ret = false;
975+
} else {
976+
raid10_log(conf->mddev, "wait barrier");
977+
wait_event_lock_irq(conf->wait_barrier,
978+
!conf->barrier ||
979+
(atomic_read(&conf->nr_pending) &&
980+
bio_list &&
981+
(!bio_list_empty(&bio_list[0]) ||
982+
!bio_list_empty(&bio_list[1]))) ||
983+
/* move on if recovery thread is
984+
* blocked by us
985+
*/
986+
(conf->mddev->thread->tsk == current &&
987+
test_bit(MD_RECOVERY_RUNNING,
988+
&conf->mddev->recovery) &&
989+
conf->nr_queued > 0),
990+
conf->resync_lock);
991+
}
985992
conf->nr_waiting--;
986993
if (!conf->nr_waiting)
987994
wake_up(&conf->wait_barrier);
988995
}
989-
atomic_inc(&conf->nr_pending);
996+
/* Only increment nr_pending when we wait */
997+
if (ret)
998+
atomic_inc(&conf->nr_pending);
990999
spin_unlock_irq(&conf->resync_lock);
1000+
return ret;
9911001
}
9921002

9931003
static void allow_barrier(struct r10conf *conf)
@@ -1098,21 +1108,30 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
10981108
* currently.
10991109
* 2. If IO spans the reshape position. Need to wait for reshape to pass.
11001110
*/
1101-
static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1111+
static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
11021112
struct bio *bio, sector_t sectors)
11031113
{
1104-
wait_barrier(conf);
1114+
/* Bail out if REQ_NOWAIT is set for the bio */
1115+
if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1116+
bio_wouldblock_error(bio);
1117+
return false;
1118+
}
11051119
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
11061120
bio->bi_iter.bi_sector < conf->reshape_progress &&
11071121
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1108-
raid10_log(conf->mddev, "wait reshape");
11091122
allow_barrier(conf);
1123+
if (bio->bi_opf & REQ_NOWAIT) {
1124+
bio_wouldblock_error(bio);
1125+
return false;
1126+
}
1127+
raid10_log(conf->mddev, "wait reshape");
11101128
wait_event(conf->wait_barrier,
11111129
conf->reshape_progress <= bio->bi_iter.bi_sector ||
11121130
conf->reshape_progress >= bio->bi_iter.bi_sector +
11131131
sectors);
1114-
wait_barrier(conf);
1132+
wait_barrier(conf, false);
11151133
}
1134+
return true;
11161135
}
11171136

11181137
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
@@ -1157,7 +1176,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
11571176
rcu_read_unlock();
11581177
}
11591178

1160-
regular_request_wait(mddev, conf, bio, r10_bio->sectors);
1179+
if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1180+
return;
11611181
rdev = read_balance(conf, r10_bio, &max_sectors);
11621182
if (!rdev) {
11631183
if (err_rdev) {
@@ -1179,7 +1199,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
11791199
bio_chain(split, bio);
11801200
allow_barrier(conf);
11811201
submit_bio_noacct(bio);
1182-
wait_barrier(conf);
1202+
wait_barrier(conf, false);
11831203
bio = split;
11841204
r10_bio->master_bio = bio;
11851205
r10_bio->sectors = max_sectors;
@@ -1338,7 +1358,7 @@ static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
13381358
raid10_log(conf->mddev, "%s wait rdev %d blocked",
13391359
__func__, blocked_rdev->raid_disk);
13401360
md_wait_for_blocked_rdev(blocked_rdev, mddev);
1341-
wait_barrier(conf);
1361+
wait_barrier(conf, false);
13421362
goto retry_wait;
13431363
}
13441364
}
@@ -1356,6 +1376,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
13561376
bio->bi_iter.bi_sector,
13571377
bio_end_sector(bio)))) {
13581378
DEFINE_WAIT(w);
1379+
/* Bail out if REQ_NOWAIT is set for the bio */
1380+
if (bio->bi_opf & REQ_NOWAIT) {
1381+
bio_wouldblock_error(bio);
1382+
return;
1383+
}
13591384
for (;;) {
13601385
prepare_to_wait(&conf->wait_barrier,
13611386
&w, TASK_IDLE);
@@ -1368,7 +1393,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
13681393
}
13691394

13701395
sectors = r10_bio->sectors;
1371-
regular_request_wait(mddev, conf, bio, sectors);
1396+
if (!regular_request_wait(mddev, conf, bio, sectors))
1397+
return;
13721398
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
13731399
(mddev->reshape_backwards
13741400
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
@@ -1380,6 +1406,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
13801406
set_mask_bits(&mddev->sb_flags, 0,
13811407
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
13821408
md_wakeup_thread(mddev->thread);
1409+
if (bio->bi_opf & REQ_NOWAIT) {
1410+
allow_barrier(conf);
1411+
bio_wouldblock_error(bio);
1412+
return;
1413+
}
13831414
raid10_log(conf->mddev, "wait reshape metadata");
13841415
wait_event(mddev->sb_wait,
13851416
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
@@ -1476,7 +1507,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
14761507
bio_chain(split, bio);
14771508
allow_barrier(conf);
14781509
submit_bio_noacct(bio);
1479-
wait_barrier(conf);
1510+
wait_barrier(conf, false);
14801511
bio = split;
14811512
r10_bio->master_bio = bio;
14821513
}
@@ -1601,7 +1632,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
16011632
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
16021633
return -EAGAIN;
16031634

1604-
wait_barrier(conf);
1635+
if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
1636+
bio_wouldblock_error(bio);
1637+
return 0;
1638+
}
1639+
wait_barrier(conf, false);
16051640

16061641
/*
16071642
* Check reshape again to avoid reshape happens after checking
@@ -1643,7 +1678,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
16431678
allow_barrier(conf);
16441679
/* Resend the fist split part */
16451680
submit_bio_noacct(split);
1646-
wait_barrier(conf);
1681+
wait_barrier(conf, false);
16471682
}
16481683
div_u64_rem(bio_end, stripe_size, &remainder);
16491684
if (remainder) {
@@ -1654,7 +1689,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
16541689
/* Resend the second split part */
16551690
submit_bio_noacct(bio);
16561691
bio = split;
1657-
wait_barrier(conf);
1692+
wait_barrier(conf, false);
16581693
}
16591694

16601695
bio_start = bio->bi_iter.bi_sector;
@@ -1810,7 +1845,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
18101845
end_disk_offset += geo->stride;
18111846
atomic_inc(&first_r10bio->remaining);
18121847
raid_end_discard_bio(r10_bio);
1813-
wait_barrier(conf);
1848+
wait_barrier(conf, false);
18141849
goto retry_discard;
18151850
}
18161851

@@ -2005,7 +2040,7 @@ static void print_conf(struct r10conf *conf)
20052040

20062041
static void close_sync(struct r10conf *conf)
20072042
{
2008-
wait_barrier(conf);
2043+
wait_barrier(conf, false);
20092044
allow_barrier(conf);
20102045

20112046
mempool_exit(&conf->r10buf_pool);
@@ -4813,7 +4848,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
48134848
if (need_flush ||
48144849
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
48154850
/* Need to update reshape_position in metadata */
4816-
wait_barrier(conf);
4851+
wait_barrier(conf, false);
48174852
mddev->reshape_position = conf->reshape_progress;
48184853
if (mddev->reshape_backwards)
48194854
mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
@@ -5236,4 +5271,3 @@ MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
52365271
MODULE_ALIAS("md-personality-9"); /* RAID10 */
52375272
MODULE_ALIAS("md-raid10");
52385273
MODULE_ALIAS("md-level-10");
5239-

0 commit comments

Comments
 (0)