Skip to content

Commit cdd4d78

Browse files
committed
dm writecache: split up writecache_map() to improve code readability
writecache_map() has grown too large and can be confusing to read given all the goto statements. Signed-off-by: Mike Snitzer <[email protected]>
1 parent 99d26de commit cdd4d78

File tree

1 file changed

+187
-151
lines changed

1 file changed

+187
-151
lines changed

drivers/md/dm-writecache.c

Lines changed: 187 additions & 151 deletions
Original file line numberDiff line numberDiff line change
@@ -1293,10 +1293,164 @@ static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
12931293
bio_list_add(&wc->flush_list, bio);
12941294
}
12951295

1296-
static int writecache_map(struct dm_target *ti, struct bio *bio)
1296+
enum wc_map_op {
1297+
WC_MAP_SUBMIT,
1298+
WC_MAP_REMAP,
1299+
WC_MAP_REMAP_ORIGIN,
1300+
WC_MAP_RETURN,
1301+
WC_MAP_ERROR,
1302+
};
1303+
1304+
static enum wc_map_op writecache_map_read(struct dm_writecache *wc, struct bio *bio)
12971305
{
1306+
enum wc_map_op map_op;
12981307
struct wc_entry *e;
1308+
1309+
read_next_block:
1310+
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1311+
if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1312+
if (WC_MODE_PMEM(wc)) {
1313+
bio_copy_block(wc, bio, memory_data(wc, e));
1314+
if (bio->bi_iter.bi_size)
1315+
goto read_next_block;
1316+
map_op = WC_MAP_SUBMIT;
1317+
} else {
1318+
dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1319+
bio_set_dev(bio, wc->ssd_dev->bdev);
1320+
bio->bi_iter.bi_sector = cache_sector(wc, e);
1321+
if (!writecache_entry_is_committed(wc, e))
1322+
writecache_wait_for_ios(wc, WRITE);
1323+
map_op = WC_MAP_REMAP;
1324+
}
1325+
} else {
1326+
if (e) {
1327+
sector_t next_boundary =
1328+
read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1329+
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT)
1330+
dm_accept_partial_bio(bio, next_boundary);
1331+
}
1332+
map_op = WC_MAP_REMAP_ORIGIN;
1333+
}
1334+
1335+
return map_op;
1336+
}
1337+
1338+
static enum wc_map_op writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
1339+
struct wc_entry *e, bool search_used)
1340+
{
1341+
unsigned bio_size = wc->block_size;
1342+
sector_t start_cache_sec = cache_sector(wc, e);
1343+
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
1344+
1345+
while (bio_size < bio->bi_iter.bi_size) {
1346+
if (!search_used) {
1347+
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
1348+
if (!f)
1349+
break;
1350+
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1351+
(bio_size >> SECTOR_SHIFT), wc->seq_count);
1352+
writecache_insert_entry(wc, f);
1353+
wc->uncommitted_blocks++;
1354+
} else {
1355+
struct wc_entry *f;
1356+
struct rb_node *next = rb_next(&e->rb_node);
1357+
if (!next)
1358+
break;
1359+
f = container_of(next, struct wc_entry, rb_node);
1360+
if (f != e + 1)
1361+
break;
1362+
if (read_original_sector(wc, f) !=
1363+
read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1364+
break;
1365+
if (unlikely(f->write_in_progress))
1366+
break;
1367+
if (writecache_entry_is_committed(wc, f))
1368+
wc->overwrote_committed = true;
1369+
e = f;
1370+
}
1371+
bio_size += wc->block_size;
1372+
current_cache_sec += wc->block_size >> SECTOR_SHIFT;
1373+
}
1374+
1375+
bio_set_dev(bio, wc->ssd_dev->bdev);
1376+
bio->bi_iter.bi_sector = start_cache_sec;
1377+
dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1378+
1379+
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1380+
wc->uncommitted_blocks = 0;
1381+
queue_work(wc->writeback_wq, &wc->flush_work);
1382+
} else {
1383+
writecache_schedule_autocommit(wc);
1384+
}
1385+
1386+
return WC_MAP_REMAP;
1387+
}
1388+
1389+
static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio *bio)
1390+
{
1391+
struct wc_entry *e;
1392+
1393+
do {
1394+
bool found_entry = false;
1395+
bool search_used = false;
1396+
if (writecache_has_error(wc))
1397+
return WC_MAP_ERROR;
1398+
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1399+
if (e) {
1400+
if (!writecache_entry_is_committed(wc, e)) {
1401+
search_used = true;
1402+
goto bio_copy;
1403+
}
1404+
if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1405+
wc->overwrote_committed = true;
1406+
search_used = true;
1407+
goto bio_copy;
1408+
}
1409+
found_entry = true;
1410+
} else {
1411+
if (unlikely(wc->cleaner) ||
1412+
(wc->metadata_only && !(bio->bi_opf & REQ_META)))
1413+
goto direct_write;
1414+
}
1415+
e = writecache_pop_from_freelist(wc, (sector_t)-1);
1416+
if (unlikely(!e)) {
1417+
if (!WC_MODE_PMEM(wc) && !found_entry) {
1418+
direct_write:
1419+
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1420+
if (e) {
1421+
sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1422+
BUG_ON(!next_boundary);
1423+
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1424+
dm_accept_partial_bio(bio, next_boundary);
1425+
}
1426+
}
1427+
return WC_MAP_REMAP_ORIGIN;
1428+
}
1429+
writecache_wait_on_freelist(wc);
1430+
continue;
1431+
}
1432+
write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1433+
writecache_insert_entry(wc, e);
1434+
wc->uncommitted_blocks++;
1435+
bio_copy:
1436+
if (WC_MODE_PMEM(wc))
1437+
bio_copy_block(wc, bio, memory_data(wc, e));
1438+
else
1439+
return writecache_bio_copy_ssd(wc, bio, e, search_used);
1440+
} while (bio->bi_iter.bi_size);
1441+
1442+
if (unlikely(bio->bi_opf & REQ_FUA || wc->uncommitted_blocks >= wc->autocommit_blocks))
1443+
writecache_flush(wc);
1444+
else
1445+
writecache_schedule_autocommit(wc);
1446+
1447+
return WC_MAP_SUBMIT;
1448+
}
1449+
1450+
static int writecache_map(struct dm_target *ti, struct bio *bio)
1451+
{
12991452
struct dm_writecache *wc = ti->private;
1453+
enum wc_map_op map_op = WC_MAP_ERROR;
13001454

13011455
bio->bi_private = NULL;
13021456

@@ -1342,167 +1496,49 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
13421496
}
13431497
}
13441498

1345-
if (bio_data_dir(bio) == READ) {
1346-
read_next_block:
1347-
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1348-
if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1349-
if (WC_MODE_PMEM(wc)) {
1350-
bio_copy_block(wc, bio, memory_data(wc, e));
1351-
if (bio->bi_iter.bi_size)
1352-
goto read_next_block;
1353-
goto unlock_submit;
1354-
} else {
1355-
dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1356-
bio_set_dev(bio, wc->ssd_dev->bdev);
1357-
bio->bi_iter.bi_sector = cache_sector(wc, e);
1358-
if (!writecache_entry_is_committed(wc, e))
1359-
writecache_wait_for_ios(wc, WRITE);
1360-
goto unlock_remap;
1361-
}
1362-
} else {
1363-
if (e) {
1364-
sector_t next_boundary =
1365-
read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1366-
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1367-
dm_accept_partial_bio(bio, next_boundary);
1368-
}
1369-
}
1370-
goto unlock_remap_origin;
1371-
}
1372-
} else {
1373-
do {
1374-
bool found_entry = false;
1375-
bool search_used = false;
1376-
if (writecache_has_error(wc))
1377-
goto unlock_error;
1378-
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1379-
if (e) {
1380-
if (!writecache_entry_is_committed(wc, e)) {
1381-
search_used = true;
1382-
goto bio_copy;
1383-
}
1384-
if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1385-
wc->overwrote_committed = true;
1386-
search_used = true;
1387-
goto bio_copy;
1388-
}
1389-
found_entry = true;
1390-
} else {
1391-
if (unlikely(wc->cleaner) ||
1392-
(wc->metadata_only && !(bio->bi_opf & REQ_META)))
1393-
goto direct_write;
1394-
}
1395-
e = writecache_pop_from_freelist(wc, (sector_t)-1);
1396-
if (unlikely(!e)) {
1397-
if (!WC_MODE_PMEM(wc) && !found_entry) {
1398-
direct_write:
1399-
e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1400-
if (e) {
1401-
sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1402-
BUG_ON(!next_boundary);
1403-
if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1404-
dm_accept_partial_bio(bio, next_boundary);
1405-
}
1406-
}
1407-
goto unlock_remap_origin;
1408-
}
1409-
writecache_wait_on_freelist(wc);
1410-
continue;
1411-
}
1412-
write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1413-
writecache_insert_entry(wc, e);
1414-
wc->uncommitted_blocks++;
1415-
bio_copy:
1416-
if (WC_MODE_PMEM(wc)) {
1417-
bio_copy_block(wc, bio, memory_data(wc, e));
1418-
} else {
1419-
unsigned bio_size = wc->block_size;
1420-
sector_t start_cache_sec = cache_sector(wc, e);
1421-
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
1422-
1423-
while (bio_size < bio->bi_iter.bi_size) {
1424-
if (!search_used) {
1425-
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
1426-
if (!f)
1427-
break;
1428-
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1429-
(bio_size >> SECTOR_SHIFT), wc->seq_count);
1430-
writecache_insert_entry(wc, f);
1431-
wc->uncommitted_blocks++;
1432-
} else {
1433-
struct wc_entry *f;
1434-
struct rb_node *next = rb_next(&e->rb_node);
1435-
if (!next)
1436-
break;
1437-
f = container_of(next, struct wc_entry, rb_node);
1438-
if (f != e + 1)
1439-
break;
1440-
if (read_original_sector(wc, f) !=
1441-
read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1442-
break;
1443-
if (unlikely(f->write_in_progress))
1444-
break;
1445-
if (writecache_entry_is_committed(wc, f))
1446-
wc->overwrote_committed = true;
1447-
e = f;
1448-
}
1449-
bio_size += wc->block_size;
1450-
current_cache_sec += wc->block_size >> SECTOR_SHIFT;
1451-
}
1452-
1453-
bio_set_dev(bio, wc->ssd_dev->bdev);
1454-
bio->bi_iter.bi_sector = start_cache_sec;
1455-
dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1456-
1457-
if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1458-
wc->uncommitted_blocks = 0;
1459-
queue_work(wc->writeback_wq, &wc->flush_work);
1460-
} else {
1461-
writecache_schedule_autocommit(wc);
1462-
}
1463-
goto unlock_remap;
1464-
}
1465-
} while (bio->bi_iter.bi_size);
1466-
1467-
if (unlikely(bio->bi_opf & REQ_FUA ||
1468-
wc->uncommitted_blocks >= wc->autocommit_blocks))
1469-
writecache_flush(wc);
1470-
else
1471-
writecache_schedule_autocommit(wc);
1472-
goto unlock_submit;
1473-
}
1499+
if (bio_data_dir(bio) == READ)
1500+
map_op = writecache_map_read(wc, bio);
1501+
else
1502+
map_op = writecache_map_write(wc, bio);
14741503

1504+
switch (map_op) {
1505+
case WC_MAP_REMAP_ORIGIN:
14751506
unlock_remap_origin:
1476-
if (likely(wc->pause != 0)) {
1477-
if (bio_op(bio) == REQ_OP_WRITE) {
1478-
dm_iot_io_begin(&wc->iot, 1);
1479-
bio->bi_private = (void *)2;
1507+
if (likely(wc->pause != 0)) {
1508+
if (bio_op(bio) == REQ_OP_WRITE) {
1509+
dm_iot_io_begin(&wc->iot, 1);
1510+
bio->bi_private = (void *)2;
1511+
}
14801512
}
1481-
}
1482-
bio_set_dev(bio, wc->dev->bdev);
1483-
wc_unlock(wc);
1484-
return DM_MAPIO_REMAPPED;
1513+
bio_set_dev(bio, wc->dev->bdev);
1514+
wc_unlock(wc);
1515+
return DM_MAPIO_REMAPPED;
14851516

1517+
case WC_MAP_REMAP:
14861518
unlock_remap:
1487-
/* make sure that writecache_end_io decrements bio_in_progress: */
1488-
bio->bi_private = (void *)1;
1489-
atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1490-
wc_unlock(wc);
1491-
return DM_MAPIO_REMAPPED;
1519+
/* make sure that writecache_end_io decrements bio_in_progress: */
1520+
bio->bi_private = (void *)1;
1521+
atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1522+
wc_unlock(wc);
1523+
return DM_MAPIO_REMAPPED;
14921524

1525+
case WC_MAP_SUBMIT:
14931526
unlock_submit:
1494-
wc_unlock(wc);
1495-
bio_endio(bio);
1496-
return DM_MAPIO_SUBMITTED;
1527+
wc_unlock(wc);
1528+
bio_endio(bio);
1529+
return DM_MAPIO_SUBMITTED;
14971530

1531+
case WC_MAP_RETURN:
14981532
unlock_return:
1499-
wc_unlock(wc);
1500-
return DM_MAPIO_SUBMITTED;
1533+
wc_unlock(wc);
1534+
return DM_MAPIO_SUBMITTED;
15011535

1536+
case WC_MAP_ERROR:
15021537
unlock_error:
1503-
wc_unlock(wc);
1504-
bio_io_error(bio);
1505-
return DM_MAPIO_SUBMITTED;
1538+
wc_unlock(wc);
1539+
bio_io_error(bio);
1540+
return DM_MAPIO_SUBMITTED;
1541+
}
15061542
}
15071543

15081544
static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)

0 commit comments

Comments
 (0)