55#include <linux/fscache.h>
66#include "internal.h"
77
8+ static struct netfs_io_request * erofs_fscache_alloc_request (struct address_space * mapping ,
9+ loff_t start , size_t len )
10+ {
11+ struct netfs_io_request * rreq ;
12+
13+ rreq = kzalloc (sizeof (struct netfs_io_request ), GFP_KERNEL );
14+ if (!rreq )
15+ return ERR_PTR (- ENOMEM );
16+
17+ rreq -> start = start ;
18+ rreq -> len = len ;
19+ rreq -> mapping = mapping ;
20+ INIT_LIST_HEAD (& rreq -> subrequests );
21+ refcount_set (& rreq -> ref , 1 );
22+ return rreq ;
23+ }
24+
25+ static void erofs_fscache_put_request (struct netfs_io_request * rreq )
26+ {
27+ if (!refcount_dec_and_test (& rreq -> ref ))
28+ return ;
29+ if (rreq -> cache_resources .ops )
30+ rreq -> cache_resources .ops -> end_operation (& rreq -> cache_resources );
31+ kfree (rreq );
32+ }
33+
34+ static void erofs_fscache_put_subrequest (struct netfs_io_subrequest * subreq )
35+ {
36+ if (!refcount_dec_and_test (& subreq -> ref ))
37+ return ;
38+ erofs_fscache_put_request (subreq -> rreq );
39+ kfree (subreq );
40+ }
41+
42+ static void erofs_fscache_clear_subrequests (struct netfs_io_request * rreq )
43+ {
44+ struct netfs_io_subrequest * subreq ;
45+
46+ while (!list_empty (& rreq -> subrequests )) {
47+ subreq = list_first_entry (& rreq -> subrequests ,
48+ struct netfs_io_subrequest , rreq_link );
49+ list_del (& subreq -> rreq_link );
50+ erofs_fscache_put_subrequest (subreq );
51+ }
52+ }
53+
54+ static void erofs_fscache_rreq_unlock_folios (struct netfs_io_request * rreq )
55+ {
56+ struct netfs_io_subrequest * subreq ;
57+ struct folio * folio ;
58+ unsigned int iopos = 0 ;
59+ pgoff_t start_page = rreq -> start / PAGE_SIZE ;
60+ pgoff_t last_page = ((rreq -> start + rreq -> len ) / PAGE_SIZE ) - 1 ;
61+ bool subreq_failed = false;
62+
63+ XA_STATE (xas , & rreq -> mapping -> i_pages , start_page );
64+
65+ subreq = list_first_entry (& rreq -> subrequests ,
66+ struct netfs_io_subrequest , rreq_link );
67+ subreq_failed = (subreq -> error < 0 );
68+
69+ rcu_read_lock ();
70+ xas_for_each (& xas , folio , last_page ) {
71+ unsigned int pgpos =
72+ (folio_index (folio ) - start_page ) * PAGE_SIZE ;
73+ unsigned int pgend = pgpos + folio_size (folio );
74+ bool pg_failed = false;
75+
76+ for (;;) {
77+ if (!subreq ) {
78+ pg_failed = true;
79+ break ;
80+ }
81+
82+ pg_failed |= subreq_failed ;
83+ if (pgend < iopos + subreq -> len )
84+ break ;
85+
86+ iopos += subreq -> len ;
87+ if (!list_is_last (& subreq -> rreq_link ,
88+ & rreq -> subrequests )) {
89+ subreq = list_next_entry (subreq , rreq_link );
90+ subreq_failed = (subreq -> error < 0 );
91+ } else {
92+ subreq = NULL ;
93+ subreq_failed = false;
94+ }
95+ if (pgend == iopos )
96+ break ;
97+ }
98+
99+ if (!pg_failed )
100+ folio_mark_uptodate (folio );
101+
102+ folio_unlock (folio );
103+ }
104+ rcu_read_unlock ();
105+ }
106+
107+ static void erofs_fscache_rreq_complete (struct netfs_io_request * rreq )
108+ {
109+ erofs_fscache_rreq_unlock_folios (rreq );
110+ erofs_fscache_clear_subrequests (rreq );
111+ erofs_fscache_put_request (rreq );
112+ }
113+
114+ static void erofc_fscache_subreq_complete (void * priv ,
115+ ssize_t transferred_or_error , bool was_async )
116+ {
117+ struct netfs_io_subrequest * subreq = priv ;
118+ struct netfs_io_request * rreq = subreq -> rreq ;
119+
120+ if (IS_ERR_VALUE (transferred_or_error ))
121+ subreq -> error = transferred_or_error ;
122+
123+ if (atomic_dec_and_test (& rreq -> nr_outstanding ))
124+ erofs_fscache_rreq_complete (rreq );
125+
126+ erofs_fscache_put_subrequest (subreq );
127+ }
128+
8129/*
9130 * Read data from fscache and fill the read data into page cache described by
10- * @start/len , which shall be both aligned with PAGE_SIZE. @pstart describes
131+ * @rreq , which shall be both aligned with PAGE_SIZE. @pstart describes
11132 * the start physical address in the cache file.
12133 */
13- static int erofs_fscache_read_folios (struct fscache_cookie * cookie ,
14- struct address_space * mapping ,
15- loff_t start , size_t len ,
16- loff_t pstart )
134+ static int erofs_fscache_read_folios_async (struct fscache_cookie * cookie ,
135+ struct netfs_io_request * rreq , loff_t pstart )
17136{
18137 enum netfs_io_source source ;
19- struct netfs_io_request rreq = {};
20- struct netfs_io_subrequest subreq = { .rreq = & rreq , };
21- struct netfs_cache_resources * cres = & rreq .cache_resources ;
22- struct super_block * sb = mapping -> host -> i_sb ;
138+ struct super_block * sb = rreq -> mapping -> host -> i_sb ;
139+ struct netfs_io_subrequest * subreq ;
140+ struct netfs_cache_resources * cres = & rreq -> cache_resources ;
23141 struct iov_iter iter ;
142+ loff_t start = rreq -> start ;
143+ size_t len = rreq -> len ;
24144 size_t done = 0 ;
25145 int ret ;
26146
147+ atomic_set (& rreq -> nr_outstanding , 1 );
148+
27149 ret = fscache_begin_read_operation (cres , cookie );
28150 if (ret )
29- return ret ;
151+ goto out ;
30152
31153 while (done < len ) {
32- subreq .start = pstart + done ;
33- subreq .len = len - done ;
34- subreq .flags = 1 << NETFS_SREQ_ONDEMAND ;
154+ subreq = kzalloc (sizeof (struct netfs_io_subrequest ),
155+ GFP_KERNEL );
156+ if (subreq ) {
157+ INIT_LIST_HEAD (& subreq -> rreq_link );
158+ refcount_set (& subreq -> ref , 2 );
159+ subreq -> rreq = rreq ;
160+ refcount_inc (& rreq -> ref );
161+ } else {
162+ ret = - ENOMEM ;
163+ goto out ;
164+ }
165+
166+ subreq -> start = pstart + done ;
167+ subreq -> len = len - done ;
168+ subreq -> flags = 1 << NETFS_SREQ_ONDEMAND ;
35169
36- source = cres -> ops -> prepare_read (& subreq , LLONG_MAX );
37- if (WARN_ON (subreq .len == 0 ))
170+ list_add_tail (& subreq -> rreq_link , & rreq -> subrequests );
171+
172+ source = cres -> ops -> prepare_read (subreq , LLONG_MAX );
173+ if (WARN_ON (subreq -> len == 0 ))
38174 source = NETFS_INVALID_READ ;
39175 if (source != NETFS_READ_FROM_CACHE ) {
40176 erofs_err (sb , "failed to fscache prepare_read (source %d)" ,
41177 source );
42178 ret = - EIO ;
179+ subreq -> error = ret ;
180+ erofs_fscache_put_subrequest (subreq );
43181 goto out ;
44182 }
45183
46- iov_iter_xarray (& iter , READ , & mapping -> i_pages ,
47- start + done , subreq .len );
48- ret = fscache_read (cres , subreq .start , & iter ,
49- NETFS_READ_HOLE_FAIL , NULL , NULL );
184+ atomic_inc (& rreq -> nr_outstanding );
185+
186+ iov_iter_xarray (& iter , READ , & rreq -> mapping -> i_pages ,
187+ start + done , subreq -> len );
188+
189+ ret = fscache_read (cres , subreq -> start , & iter ,
190+ NETFS_READ_HOLE_FAIL ,
191+ erofc_fscache_subreq_complete , subreq );
192+ if (ret == - EIOCBQUEUED )
193+ ret = 0 ;
50194 if (ret ) {
51195 erofs_err (sb , "failed to fscache_read (ret %d)" , ret );
52196 goto out ;
53197 }
54198
55- done += subreq . len ;
199+ done += subreq -> len ;
56200 }
57201out :
58- fscache_end_operation (cres );
202+ if (atomic_dec_and_test (& rreq -> nr_outstanding ))
203+ erofs_fscache_rreq_complete (rreq );
204+
59205 return ret ;
60206}
61207
@@ -64,6 +210,7 @@ static int erofs_fscache_meta_readpage(struct file *data, struct page *page)
64210 int ret ;
65211 struct folio * folio = page_folio (page );
66212 struct super_block * sb = folio_mapping (folio )-> host -> i_sb ;
213+ struct netfs_io_request * rreq ;
67214 struct erofs_map_dev mdev = {
68215 .m_deviceid = 0 ,
69216 .m_pa = folio_pos (folio ),
@@ -73,11 +220,13 @@ static int erofs_fscache_meta_readpage(struct file *data, struct page *page)
73220 if (ret )
74221 goto out ;
75222
76- ret = erofs_fscache_read_folios (mdev .m_fscache -> cookie ,
77- folio_mapping (folio ), folio_pos (folio ),
78- folio_size (folio ), mdev .m_pa );
79- if (!ret )
80- folio_mark_uptodate (folio );
223+ rreq = erofs_fscache_alloc_request (folio_mapping (folio ),
224+ folio_pos (folio ), folio_size (folio ));
225+ if (IS_ERR (rreq ))
226+ goto out ;
227+
228+ return erofs_fscache_read_folios_async (mdev .m_fscache -> cookie ,
229+ rreq , mdev .m_pa );
81230out :
82231 folio_unlock (folio );
83232 return ret ;
@@ -117,6 +266,7 @@ static int erofs_fscache_readpage(struct file *file, struct page *page)
117266 struct super_block * sb = inode -> i_sb ;
118267 struct erofs_map_blocks map ;
119268 struct erofs_map_dev mdev ;
269+ struct netfs_io_request * rreq ;
120270 erofs_off_t pos ;
121271 loff_t pstart ;
122272 int ret ;
@@ -149,10 +299,15 @@ static int erofs_fscache_readpage(struct file *file, struct page *page)
149299 if (ret )
150300 goto out_unlock ;
151301
302+
303+ rreq = erofs_fscache_alloc_request (folio_mapping (folio ),
304+ folio_pos (folio ), folio_size (folio ));
305+ if (IS_ERR (rreq ))
306+ goto out_unlock ;
307+
152308 pstart = mdev .m_pa + (pos - map .m_la );
153- ret = erofs_fscache_read_folios (mdev .m_fscache -> cookie ,
154- folio_mapping (folio ), folio_pos (folio ),
155- folio_size (folio ), pstart );
309+ return erofs_fscache_read_folios_async (mdev .m_fscache -> cookie ,
310+ rreq , pstart );
156311
157312out_uptodate :
158313 if (!ret )
@@ -162,15 +317,16 @@ static int erofs_fscache_readpage(struct file *file, struct page *page)
162317 return ret ;
163318}
164319
165- static void erofs_fscache_unlock_folios (struct readahead_control * rac ,
166- size_t len )
320+ static void erofs_fscache_advance_folios (struct readahead_control * rac ,
321+ size_t len , bool unlock )
167322{
168323 while (len ) {
169324 struct folio * folio = readahead_folio (rac );
170-
171325 len -= folio_size (folio );
172- folio_mark_uptodate (folio );
173- folio_unlock (folio );
326+ if (unlock ) {
327+ folio_mark_uptodate (folio );
328+ folio_unlock (folio );
329+ }
174330 }
175331}
176332
@@ -192,6 +348,7 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
192348 do {
193349 struct erofs_map_blocks map ;
194350 struct erofs_map_dev mdev ;
351+ struct netfs_io_request * rreq ;
195352
196353 pos = start + done ;
197354 map .m_la = pos ;
@@ -211,7 +368,7 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
211368 offset , count );
212369 iov_iter_zero (count , & iter );
213370
214- erofs_fscache_unlock_folios (rac , count );
371+ erofs_fscache_advance_folios (rac , count , true );
215372 ret = count ;
216373 continue ;
217374 }
@@ -237,17 +394,18 @@ static void erofs_fscache_readahead(struct readahead_control *rac)
237394 if (ret )
238395 return ;
239396
240- ret = erofs_fscache_read_folios ( mdev . m_fscache -> cookie ,
241- rac -> mapping , offset , count ,
242- mdev . m_pa + ( pos - map . m_la )) ;
397+ rreq = erofs_fscache_alloc_request ( rac -> mapping , offset , count );
398+ if ( IS_ERR ( rreq ))
399+ return ;
243400 /*
244- * For the error cases, the folios will be unlocked when
245- * .readahead () returns .
401+ * Drop the ref of folios here. Unlock them in
402+ * rreq_unlock_folios () when rreq complete .
246403 */
247- if (!ret ) {
248- erofs_fscache_unlock_folios (rac , count );
404+ erofs_fscache_advance_folios (rac , count , false);
405+ ret = erofs_fscache_read_folios_async (mdev .m_fscache -> cookie ,
406+ rreq , mdev .m_pa + (pos - map .m_la ));
407+ if (!ret )
249408 ret = count ;
250- }
251409 } while (ret > 0 && ((done += ret ) < len ));
252410}
253411
0 commit comments