1414#include <linux/netfs.h>
1515#include "internal.h"
1616
17+ static void afs_write_to_cache (struct afs_vnode * vnode , loff_t start , size_t len ,
18+ loff_t i_size , bool caching );
19+
20+ #ifdef CONFIG_AFS_FSCACHE
1721/*
18- * mark a page as having been made dirty and thus needing writeback
22+ * Mark a page as having been made dirty and thus needing writeback. We also
23+ * need to pin the cache object to write back to.
1924 */
2025int afs_set_page_dirty (struct page * page )
2126{
22- _enter ("" );
23- return __set_page_dirty_nobuffers (page );
27+ return fscache_set_page_dirty (page , afs_vnode_cache (AFS_FS_I (page -> mapping -> host )));
28+ }
29+ static void afs_folio_start_fscache (bool caching , struct folio * folio )
30+ {
31+ if (caching )
32+ folio_start_fscache (folio );
33+ }
34+ #else
35+ static void afs_folio_start_fscache (bool caching , struct folio * folio )
36+ {
2437}
38+ #endif
2539
2640/*
2741 * prepare to perform part of a write to a page
@@ -113,7 +127,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
113127 unsigned long priv ;
114128 unsigned int f , from = offset_in_folio (folio , pos );
115129 unsigned int t , to = from + copied ;
116- loff_t i_size , maybe_i_size ;
130+ loff_t i_size , write_end_pos ;
117131
118132 _enter ("{%llx:%llu},{%lx}" ,
119133 vnode -> fid .vid , vnode -> fid .vnode , folio_index (folio ));
@@ -130,15 +144,16 @@ int afs_write_end(struct file *file, struct address_space *mapping,
130144 if (copied == 0 )
131145 goto out ;
132146
133- maybe_i_size = pos + copied ;
147+ write_end_pos = pos + copied ;
134148
135149 i_size = i_size_read (& vnode -> vfs_inode );
136- if (maybe_i_size > i_size ) {
150+ if (write_end_pos > i_size ) {
137151 write_seqlock (& vnode -> cb_lock );
138152 i_size = i_size_read (& vnode -> vfs_inode );
139- if (maybe_i_size > i_size )
140- afs_set_i_size (vnode , maybe_i_size );
153+ if (write_end_pos > i_size )
154+ afs_set_i_size (vnode , write_end_pos );
141155 write_sequnlock (& vnode -> cb_lock );
156+ fscache_update_cookie (afs_vnode_cache (vnode ), NULL , & write_end_pos );
142157 }
143158
144159 if (folio_test_private (folio )) {
@@ -417,6 +432,7 @@ static void afs_extend_writeback(struct address_space *mapping,
417432 loff_t start ,
418433 loff_t max_len ,
419434 bool new_content ,
435+ bool caching ,
420436 unsigned int * _len )
421437{
422438 struct pagevec pvec ;
@@ -463,7 +479,9 @@ static void afs_extend_writeback(struct address_space *mapping,
463479 folio_put (folio );
464480 break ;
465481 }
466- if (!folio_test_dirty (folio ) || folio_test_writeback (folio )) {
482+ if (!folio_test_dirty (folio ) ||
483+ folio_test_writeback (folio ) ||
484+ folio_test_fscache (folio )) {
467485 folio_unlock (folio );
468486 folio_put (folio );
469487 break ;
@@ -511,6 +529,7 @@ static void afs_extend_writeback(struct address_space *mapping,
511529 BUG ();
512530 if (folio_start_writeback (folio ))
513531 BUG ();
532+ afs_folio_start_fscache (caching , folio );
514533
515534 * _count -= folio_nr_pages (folio );
516535 folio_unlock (folio );
@@ -538,13 +557,15 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
538557 unsigned int offset , to , len , max_len ;
539558 loff_t i_size = i_size_read (& vnode -> vfs_inode );
540559 bool new_content = test_bit (AFS_VNODE_NEW_CONTENT , & vnode -> flags );
560+ bool caching = fscache_cookie_enabled (afs_vnode_cache (vnode ));
541561 long count = wbc -> nr_to_write ;
542562 int ret ;
543563
544564 _enter (",%lx,%llx-%llx" , folio_index (folio ), start , end );
545565
546566 if (folio_start_writeback (folio ))
547567 BUG ();
568+ afs_folio_start_fscache (caching , folio );
548569
549570 count -= folio_nr_pages (folio );
550571
@@ -571,7 +592,8 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
571592 if (len < max_len &&
572593 (to == folio_size (folio ) || new_content ))
573594 afs_extend_writeback (mapping , vnode , & count ,
574- start , max_len , new_content , & len );
595+ start , max_len , new_content ,
596+ caching , & len );
575597 len = min_t (loff_t , len , max_len );
576598 }
577599
@@ -584,12 +606,19 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
584606 if (start < i_size ) {
585607 _debug ("write back %x @%llx [%llx]" , len , start , i_size );
586608
609+ /* Speculatively write to the cache. We have to fix this up
610+ * later if the store fails.
611+ */
612+ afs_write_to_cache (vnode , start , len , i_size , caching );
613+
587614 iov_iter_xarray (& iter , WRITE , & mapping -> i_pages , start , len );
588615 ret = afs_store_data (vnode , & iter , start , false);
589616 } else {
590617 _debug ("write discard %x @%llx [%llx]" , len , start , i_size );
591618
592619 /* The dirty region was entirely beyond the EOF. */
620+ fscache_clear_page_bits (afs_vnode_cache (vnode ),
621+ mapping , start , len , caching );
593622 afs_pages_written_back (vnode , start , len );
594623 ret = 0 ;
595624 }
@@ -648,6 +677,10 @@ int afs_writepage(struct page *subpage, struct writeback_control *wbc)
648677
649678 _enter ("{%lx}," , folio_index (folio ));
650679
680+ #ifdef CONFIG_AFS_FSCACHE
681+ folio_wait_fscache (folio );
682+ #endif
683+
651684 start = folio_index (folio ) * PAGE_SIZE ;
652685 ret = afs_write_back_from_locked_folio (folio_mapping (folio ), wbc ,
653686 folio , start , LLONG_MAX - start );
@@ -713,10 +746,15 @@ static int afs_writepages_region(struct address_space *mapping,
713746 continue ;
714747 }
715748
716- if (folio_test_writeback (folio )) {
749+ if (folio_test_writeback (folio ) ||
750+ folio_test_fscache (folio )) {
717751 folio_unlock (folio );
718- if (wbc -> sync_mode != WB_SYNC_NONE )
752+ if (wbc -> sync_mode != WB_SYNC_NONE ) {
719753 folio_wait_writeback (folio );
754+ #ifdef CONFIG_AFS_FSCACHE
755+ folio_wait_fscache (folio );
756+ #endif
757+ }
720758 folio_put (folio );
721759 continue ;
722760 }
@@ -969,3 +1007,28 @@ int afs_launder_page(struct page *subpage)
9691007 folio_wait_fscache (folio );
9701008 return ret ;
9711009}
1010+
1011+ /*
1012+ * Deal with the completion of writing the data to the cache.
1013+ */
1014+ static void afs_write_to_cache_done (void * priv , ssize_t transferred_or_error ,
1015+ bool was_async )
1016+ {
1017+ struct afs_vnode * vnode = priv ;
1018+
1019+ if (IS_ERR_VALUE (transferred_or_error ) &&
1020+ transferred_or_error != - ENOBUFS )
1021+ afs_invalidate_cache (vnode , 0 );
1022+ }
1023+
1024+ /*
1025+ * Save the write to the cache also.
1026+ */
1027+ static void afs_write_to_cache (struct afs_vnode * vnode ,
1028+ loff_t start , size_t len , loff_t i_size ,
1029+ bool caching )
1030+ {
1031+ fscache_write_to_cache (afs_vnode_cache (vnode ),
1032+ vnode -> vfs_inode .i_mapping , start , len , i_size ,
1033+ afs_write_to_cache_done , vnode , caching );
1034+ }
0 commit comments