@@ -556,23 +556,22 @@ const struct address_space_operations nfs_file_aops = {
556556 */
557557static vm_fault_t nfs_vm_page_mkwrite (struct vm_fault * vmf )
558558{
559- struct page * page = vmf -> page ;
560559 struct file * filp = vmf -> vma -> vm_file ;
561560 struct inode * inode = file_inode (filp );
562561 unsigned pagelen ;
563562 vm_fault_t ret = VM_FAULT_NOPAGE ;
564563 struct address_space * mapping ;
565- struct folio * folio = page_folio (page );
564+ struct folio * folio = page_folio (vmf -> page );
566565
567566 dfprintk (PAGECACHE , "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n" ,
568- filp , filp -> f_mapping -> host -> i_ino ,
569- (long long )page_offset ( page ));
567+ filp , filp -> f_mapping -> host -> i_ino ,
568+ (long long )folio_file_pos ( folio ));
570569
571570 sb_start_pagefault (inode -> i_sb );
572571
573572 /* make sure the cache has finished storing the page */
574- if (PageFsCache ( page ) &&
575- wait_on_page_fscache_killable ( vmf -> page ) < 0 ) {
573+ if (folio_test_fscache ( folio ) &&
574+ folio_wait_fscache_killable ( folio ) < 0 ) {
576575 ret = VM_FAULT_RETRY ;
577576 goto out ;
578577 }
@@ -581,14 +580,14 @@ static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
581580 nfs_wait_bit_killable ,
582581 TASK_KILLABLE |TASK_FREEZABLE_UNSAFE );
583582
584- lock_page ( page );
585- mapping = page_file_mapping ( page );
583+ folio_lock ( folio );
584+ mapping = folio_file_mapping ( folio );
586585 if (mapping != inode -> i_mapping )
587586 goto out_unlock ;
588587
589- wait_on_page_writeback ( page );
588+ folio_wait_writeback ( folio );
590589
591- pagelen = nfs_page_length ( page );
590+ pagelen = nfs_folio_length ( folio );
592591 if (pagelen == 0 )
593592 goto out_unlock ;
594593
@@ -599,7 +598,7 @@ static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
599598
600599 ret = VM_FAULT_SIGBUS ;
601600out_unlock :
602- unlock_page ( page );
601+ folio_unlock ( folio );
603602out :
604603 sb_end_pagefault (inode -> i_sb );
605604 return ret ;
0 commit comments