@@ -460,35 +460,6 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
460460 return ret ;
461461}
462462
463- /*
464- * Invalidate exceptional DAX entry if easily possible. This handles DAX
465- * entries for invalidate_inode_pages() so we evict the entry only if we can
466- * do so without blocking.
467- */
468- int dax_invalidate_mapping_entry (struct address_space * mapping , pgoff_t index )
469- {
470- int ret = 0 ;
471- void * entry , * * slot ;
472- struct radix_tree_root * page_tree = & mapping -> page_tree ;
473-
474- spin_lock_irq (& mapping -> tree_lock );
475- entry = __radix_tree_lookup (page_tree , index , NULL , & slot );
476- if (!entry || !radix_tree_exceptional_entry (entry ) ||
477- slot_locked (mapping , slot ))
478- goto out ;
479- if (radix_tree_tag_get (page_tree , index , PAGECACHE_TAG_DIRTY ) ||
480- radix_tree_tag_get (page_tree , index , PAGECACHE_TAG_TOWRITE ))
481- goto out ;
482- radix_tree_delete (page_tree , index );
483- mapping -> nrexceptional -- ;
484- ret = 1 ;
485- out :
486- spin_unlock_irq (& mapping -> tree_lock );
487- if (ret )
488- dax_wake_mapping_entry_waiter (mapping , index , entry , true);
489- return ret ;
490- }
491-
492463/*
493464 * Invalidate exceptional DAX entry if it is clean.
494465 */
@@ -1044,7 +1015,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
10441015 * into page tables. We have to tear down these mappings so that data
10451016 * written by write(2) is visible in mmap.
10461017 */
1047- if (( iomap -> flags & IOMAP_F_NEW ) && inode -> i_mapping -> nrpages ) {
1018+ if (iomap -> flags & IOMAP_F_NEW ) {
10481019 invalidate_inode_pages2_range (inode -> i_mapping ,
10491020 pos >> PAGE_SHIFT ,
10501021 (end - 1 ) >> PAGE_SHIFT );
@@ -1177,6 +1148,12 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
11771148 if ((vmf -> flags & FAULT_FLAG_WRITE ) && !vmf -> cow_page )
11781149 flags |= IOMAP_WRITE ;
11791150
1151+ entry = grab_mapping_entry (mapping , vmf -> pgoff , 0 );
1152+ if (IS_ERR (entry )) {
1153+ vmf_ret = dax_fault_return (PTR_ERR (entry ));
1154+ goto out ;
1155+ }
1156+
11801157 /*
11811158 * Note that we don't bother to use iomap_apply here: DAX required
11821159 * the file system block size to be equal the page size, which means
@@ -1185,17 +1162,11 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
11851162 error = ops -> iomap_begin (inode , pos , PAGE_SIZE , flags , & iomap );
11861163 if (error ) {
11871164 vmf_ret = dax_fault_return (error );
1188- goto out ;
1165+ goto unlock_entry ;
11891166 }
11901167 if (WARN_ON_ONCE (iomap .offset + iomap .length < pos + PAGE_SIZE )) {
1191- vmf_ret = dax_fault_return (- EIO ); /* fs corruption? */
1192- goto finish_iomap ;
1193- }
1194-
1195- entry = grab_mapping_entry (mapping , vmf -> pgoff , 0 );
1196- if (IS_ERR (entry )) {
1197- vmf_ret = dax_fault_return (PTR_ERR (entry ));
1198- goto finish_iomap ;
1168+ error = - EIO ; /* fs corruption? */
1169+ goto error_finish_iomap ;
11991170 }
12001171
12011172 sector = dax_iomap_sector (& iomap , pos );
@@ -1217,13 +1188,13 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
12171188 }
12181189
12191190 if (error )
1220- goto error_unlock_entry ;
1191+ goto error_finish_iomap ;
12211192
12221193 __SetPageUptodate (vmf -> cow_page );
12231194 vmf_ret = finish_fault (vmf );
12241195 if (!vmf_ret )
12251196 vmf_ret = VM_FAULT_DONE_COW ;
1226- goto unlock_entry ;
1197+ goto finish_iomap ;
12271198 }
12281199
12291200 switch (iomap .type ) {
@@ -1243,7 +1214,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
12431214 case IOMAP_HOLE :
12441215 if (!(vmf -> flags & FAULT_FLAG_WRITE )) {
12451216 vmf_ret = dax_load_hole (mapping , & entry , vmf );
1246- goto unlock_entry ;
1217+ goto finish_iomap ;
12471218 }
12481219 /*FALLTHRU*/
12491220 default :
@@ -1252,10 +1223,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
12521223 break ;
12531224 }
12541225
1255- error_unlock_entry :
1226+ error_finish_iomap :
12561227 vmf_ret = dax_fault_return (error ) | major ;
1257- unlock_entry :
1258- put_locked_mapping_entry (mapping , vmf -> pgoff , entry );
12591228 finish_iomap :
12601229 if (ops -> iomap_end ) {
12611230 int copied = PAGE_SIZE ;
@@ -1270,7 +1239,9 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
12701239 */
12711240 ops -> iomap_end (inode , pos , PAGE_SIZE , copied , flags , & iomap );
12721241 }
1273- out :
1242+ unlock_entry :
1243+ put_locked_mapping_entry (mapping , vmf -> pgoff , entry );
1244+ out :
12741245 trace_dax_pte_fault_done (inode , vmf , vmf_ret );
12751246 return vmf_ret ;
12761247}
@@ -1416,6 +1387,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
14161387 if ((pgoff | PG_PMD_COLOUR ) > max_pgoff )
14171388 goto fallback ;
14181389
1390+ /*
1391+ * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1392+ * PMD or a HZP entry. If it can't (because a 4k page is already in
1393+ * the tree, for instance), it will return -EEXIST and we just fall
1394+ * back to 4k entries.
1395+ */
1396+ entry = grab_mapping_entry (mapping , pgoff , RADIX_DAX_PMD );
1397+ if (IS_ERR (entry ))
1398+ goto fallback ;
1399+
14191400 /*
14201401 * Note that we don't use iomap_apply here. We aren't doing I/O, only
14211402 * setting up a mapping, so really we're using iomap_begin() as a way
@@ -1424,38 +1405,26 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
14241405 pos = (loff_t )pgoff << PAGE_SHIFT ;
14251406 error = ops -> iomap_begin (inode , pos , PMD_SIZE , iomap_flags , & iomap );
14261407 if (error )
1427- goto fallback ;
1408+ goto unlock_entry ;
14281409
14291410 if (iomap .offset + iomap .length < pos + PMD_SIZE )
14301411 goto finish_iomap ;
14311412
1432- /*
1433- * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1434- * PMD or a HZP entry. If it can't (because a 4k page is already in
1435- * the tree, for instance), it will return -EEXIST and we just fall
1436- * back to 4k entries.
1437- */
1438- entry = grab_mapping_entry (mapping , pgoff , RADIX_DAX_PMD );
1439- if (IS_ERR (entry ))
1440- goto finish_iomap ;
1441-
14421413 switch (iomap .type ) {
14431414 case IOMAP_MAPPED :
14441415 result = dax_pmd_insert_mapping (vmf , & iomap , pos , & entry );
14451416 break ;
14461417 case IOMAP_UNWRITTEN :
14471418 case IOMAP_HOLE :
14481419 if (WARN_ON_ONCE (write ))
1449- goto unlock_entry ;
1420+ break ;
14501421 result = dax_pmd_load_hole (vmf , & iomap , & entry );
14511422 break ;
14521423 default :
14531424 WARN_ON_ONCE (1 );
14541425 break ;
14551426 }
14561427
1457- unlock_entry :
1458- put_locked_mapping_entry (mapping , pgoff , entry );
14591428 finish_iomap :
14601429 if (ops -> iomap_end ) {
14611430 int copied = PMD_SIZE ;
@@ -1471,6 +1440,8 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
14711440 ops -> iomap_end (inode , pos , PMD_SIZE , copied , iomap_flags ,
14721441 & iomap );
14731442 }
1443+ unlock_entry :
1444+ put_locked_mapping_entry (mapping , pgoff , entry );
14741445 fallback :
14751446 if (result == VM_FAULT_FALLBACK ) {
14761447 split_huge_pmd (vma , vmf -> pmd , vmf -> address );
0 commit comments