@@ -1431,6 +1431,126 @@ static int cluster_pages_for_defrag(struct inode *inode,
14311431
14321432}
14331433
1434+ struct defrag_target_range {
1435+ struct list_head list ;
1436+ u64 start ;
1437+ u64 len ;
1438+ };
1439+
1440+ /*
1441+ * Collect all valid target extents.
1442+ *
1443+ * @start: file offset to lookup
1444+ * @len: length to lookup
1445+ * @extent_thresh: file extent size threshold, any extent size >= this value
1446+ * will be ignored
1447+ * @newer_than: only defrag extents newer than this value
1448+ * @do_compress: whether the defrag is doing compression
1449+ * if true, @extent_thresh will be ignored and all regular
1450+ * file extents meeting @newer_than will be targets.
1451+ * @target_list: list of targets file extents
1452+ */
1453+ static int defrag_collect_targets (struct btrfs_inode * inode ,
1454+ u64 start , u64 len , u32 extent_thresh ,
1455+ u64 newer_than , bool do_compress ,
1456+ struct list_head * target_list )
1457+ {
1458+ u64 cur = start ;
1459+ int ret = 0 ;
1460+
1461+ while (cur < start + len ) {
1462+ struct extent_map * em ;
1463+ struct defrag_target_range * new ;
1464+ bool next_mergeable = true;
1465+ u64 range_len ;
1466+
1467+ em = defrag_lookup_extent (& inode -> vfs_inode , cur );
1468+ if (!em )
1469+ break ;
1470+
1471+ /* Skip hole/inline/preallocated extents */
1472+ if (em -> block_start >= EXTENT_MAP_LAST_BYTE ||
1473+ test_bit (EXTENT_FLAG_PREALLOC , & em -> flags ))
1474+ goto next ;
1475+
1476+ /* Skip older extent */
1477+ if (em -> generation < newer_than )
1478+ goto next ;
1479+
1480+ /*
1481+ * For do_compress case, we want to compress all valid file
1482+ * extents, thus no @extent_thresh or mergeable check.
1483+ */
1484+ if (do_compress )
1485+ goto add ;
1486+
1487+ /* Skip too large extent */
1488+ if (em -> len >= extent_thresh )
1489+ goto next ;
1490+
1491+ next_mergeable = defrag_check_next_extent (& inode -> vfs_inode , em );
1492+ if (!next_mergeable ) {
1493+ struct defrag_target_range * last ;
1494+
1495+ /* Empty target list, no way to merge with last entry */
1496+ if (list_empty (target_list ))
1497+ goto next ;
1498+ last = list_entry (target_list -> prev ,
1499+ struct defrag_target_range , list );
1500+ /* Not mergeable with last entry */
1501+ if (last -> start + last -> len != cur )
1502+ goto next ;
1503+
1504+ /* Mergeable, fall through to add it to @target_list. */
1505+ }
1506+
1507+ add :
1508+ range_len = min (extent_map_end (em ), start + len ) - cur ;
1509+ /*
1510+ * This one is a good target, check if it can be merged into
1511+ * last range of the target list.
1512+ */
1513+ if (!list_empty (target_list )) {
1514+ struct defrag_target_range * last ;
1515+
1516+ last = list_entry (target_list -> prev ,
1517+ struct defrag_target_range , list );
1518+ ASSERT (last -> start + last -> len <= cur );
1519+ if (last -> start + last -> len == cur ) {
1520+ /* Mergeable, enlarge the last entry */
1521+ last -> len += range_len ;
1522+ goto next ;
1523+ }
1524+ /* Fall through to allocate a new entry */
1525+ }
1526+
1527+ /* Allocate new defrag_target_range */
1528+ new = kmalloc (sizeof (* new ), GFP_NOFS );
1529+ if (!new ) {
1530+ free_extent_map (em );
1531+ ret = - ENOMEM ;
1532+ break ;
1533+ }
1534+ new -> start = cur ;
1535+ new -> len = range_len ;
1536+ list_add_tail (& new -> list , target_list );
1537+
1538+ next :
1539+ cur = extent_map_end (em );
1540+ free_extent_map (em );
1541+ }
1542+ if (ret < 0 ) {
1543+ struct defrag_target_range * entry ;
1544+ struct defrag_target_range * tmp ;
1545+
1546+ list_for_each_entry_safe (entry , tmp , target_list , list ) {
1547+ list_del_init (& entry -> list );
1548+ kfree (entry );
1549+ }
1550+ }
1551+ return ret ;
1552+ }
1553+
14341554/*
14351555 * Entry point to file defragmentation.
14361556 *
0 commit comments