Skip to content

Commit b18c3ab

Browse files
adam900710kdave
authored andcommitted
btrfs: defrag: introduce helper to defrag one cluster
This new helper, defrag_one_cluster(), will defrag one cluster (at most 256K): - Collect all initial targets - Kick in readahead when possible - Call defrag_one_range() on each initial target With some extra range clamping. - Update @sectors_defragged parameter This involves one behavior change, the defragged sectors accounting is no longer as accurate as old behavior, as the initial targets are not consistent. We can have new holes punched inside the initial target, and we will skip such holes later. But the defragged sectors accounting doesn't need to be that accurate anyway, thus I don't want to pass those extra accounting burden into defrag_one_range(). Signed-off-by: Qu Wenruo <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
1 parent e9eec72 commit b18c3ab

File tree

1 file changed

+56
-0
lines changed

1 file changed

+56
-0
lines changed

fs/btrfs/ioctl.c

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1689,6 +1689,62 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
16891689
return ret;
16901690
}
16911691

1692+
static int defrag_one_cluster(struct btrfs_inode *inode,
1693+
struct file_ra_state *ra,
1694+
u64 start, u32 len, u32 extent_thresh,
1695+
u64 newer_than, bool do_compress,
1696+
unsigned long *sectors_defragged,
1697+
unsigned long max_sectors)
1698+
{
1699+
const u32 sectorsize = inode->root->fs_info->sectorsize;
1700+
struct defrag_target_range *entry;
1701+
struct defrag_target_range *tmp;
1702+
LIST_HEAD(target_list);
1703+
int ret;
1704+
1705+
BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
1706+
ret = defrag_collect_targets(inode, start, len, extent_thresh,
1707+
newer_than, do_compress, false,
1708+
&target_list);
1709+
if (ret < 0)
1710+
goto out;
1711+
1712+
list_for_each_entry(entry, &target_list, list) {
1713+
u32 range_len = entry->len;
1714+
1715+
/* Reached the limit */
1716+
if (max_sectors && max_sectors == *sectors_defragged)
1717+
break;
1718+
1719+
if (max_sectors)
1720+
range_len = min_t(u32, range_len,
1721+
(max_sectors - *sectors_defragged) * sectorsize);
1722+
1723+
if (ra)
1724+
page_cache_sync_readahead(inode->vfs_inode.i_mapping,
1725+
ra, NULL, entry->start >> PAGE_SHIFT,
1726+
((entry->start + range_len - 1) >> PAGE_SHIFT) -
1727+
(entry->start >> PAGE_SHIFT) + 1);
1728+
/*
1729+
* Here we may not defrag any range if holes are punched before
1730+
* we locked the pages.
1731+
* But that's fine, it only affects the @sectors_defragged
1732+
* accounting.
1733+
*/
1734+
ret = defrag_one_range(inode, entry->start, range_len,
1735+
extent_thresh, newer_than, do_compress);
1736+
if (ret < 0)
1737+
break;
1738+
*sectors_defragged += range_len;
1739+
}
1740+
out:
1741+
list_for_each_entry_safe(entry, tmp, &target_list, list) {
1742+
list_del_init(&entry->list);
1743+
kfree(entry);
1744+
}
1745+
return ret;
1746+
}
1747+
16921748
/*
16931749
* Entry point to file defragmentation.
16941750
*

0 commit comments

Comments
 (0)