@@ -116,7 +116,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
116116 if (atomic_read (& c -> io_count ) == 0 )
117117 break ;
118118 ret = nfs_wait_bit_killable (& q .key );
119- } while (atomic_read (& c -> io_count ) != 0 );
119+ } while (atomic_read (& c -> io_count ) != 0 && ! ret );
120120 finish_wait (wq , & q .wait );
121121 return ret ;
122122}
@@ -139,26 +139,49 @@ nfs_iocounter_wait(struct nfs_io_counter *c)
139139/*
140140 * nfs_page_group_lock - lock the head of the page group
141141 * @req - request in group that is to be locked
142+ * @nonblock - if true don't block waiting for lock
142143 *
143144 * this lock must be held if modifying the page group list
144145 *
145- * returns result from wait_on_bit_lock: 0 on success, < 0 on error
146+ * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
147+ * result from wait_on_bit_lock
148+ *
149+ * NOTE: calling with nonblock=false should always have set the
150+ * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
151+ * with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
146152 */
147153int
148- nfs_page_group_lock (struct nfs_page * req , bool wait )
154+ nfs_page_group_lock (struct nfs_page * req , bool nonblock )
149155{
150156 struct nfs_page * head = req -> wb_head ;
151- int ret ;
152157
153158 WARN_ON_ONCE (head != head -> wb_head );
154159
155- do {
156- ret = wait_on_bit_lock (& head -> wb_flags , PG_HEADLOCK ,
157- TASK_UNINTERRUPTIBLE );
158- } while (wait && ret != 0 );
160+ if (!test_and_set_bit (PG_HEADLOCK , & head -> wb_flags ))
161+ return 0 ;
159162
160- WARN_ON_ONCE (ret > 0 );
161- return ret ;
163+ if (!nonblock )
164+ return wait_on_bit_lock (& head -> wb_flags , PG_HEADLOCK ,
165+ TASK_UNINTERRUPTIBLE );
166+
167+ return - EAGAIN ;
168+ }
169+
170+ /*
171+ * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
172+ * @req - a request in the group
173+ *
174+ * This is a blocking call to wait for the group lock to be cleared.
175+ */
176+ void
177+ nfs_page_group_lock_wait (struct nfs_page * req )
178+ {
179+ struct nfs_page * head = req -> wb_head ;
180+
181+ WARN_ON_ONCE (head != head -> wb_head );
182+
183+ wait_on_bit (& head -> wb_flags , PG_HEADLOCK ,
184+ TASK_UNINTERRUPTIBLE );
162185}
163186
164187/*
@@ -219,7 +242,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
219242{
220243 bool ret ;
221244
222- nfs_page_group_lock (req , true );
245+ nfs_page_group_lock (req , false );
223246 ret = nfs_page_group_sync_on_bit_locked (req , bit );
224247 nfs_page_group_unlock (req );
225248
@@ -701,23 +724,35 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
701724 struct nfs_pgio_header * hdr )
702725{
703726 struct nfs_page * req ;
704- struct page * * pages ;
727+ struct page * * pages ,
728+ * last_page ;
705729 struct list_head * head = & desc -> pg_list ;
706730 struct nfs_commit_info cinfo ;
707- unsigned int pagecount ;
731+ unsigned int pagecount , pageused ;
708732
709733 pagecount = nfs_page_array_len (desc -> pg_base , desc -> pg_count );
710734 if (!nfs_pgarray_set (& hdr -> page_array , pagecount ))
711735 return nfs_pgio_error (desc , hdr );
712736
713737 nfs_init_cinfo (& cinfo , desc -> pg_inode , desc -> pg_dreq );
714738 pages = hdr -> page_array .pagevec ;
739+ last_page = NULL ;
740+ pageused = 0 ;
715741 while (!list_empty (head )) {
716742 req = nfs_list_entry (head -> next );
717743 nfs_list_remove_request (req );
718744 nfs_list_add_request (req , & hdr -> pages );
719- * pages ++ = req -> wb_page ;
745+
746+ if (WARN_ON_ONCE (pageused >= pagecount ))
747+ return nfs_pgio_error (desc , hdr );
748+
749+ if (!last_page || last_page != req -> wb_page ) {
750+ * pages ++ = last_page = req -> wb_page ;
751+ pageused ++ ;
752+ }
720753 }
754+ if (WARN_ON_ONCE (pageused != pagecount ))
755+ return nfs_pgio_error (desc , hdr );
721756
722757 if ((desc -> pg_ioflags & FLUSH_COND_STABLE ) &&
723758 (desc -> pg_moreio || nfs_reqs_to_commit (& cinfo )))
@@ -788,6 +823,14 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
788823 return false;
789824 if (req_offset (req ) != req_offset (prev ) + prev -> wb_bytes )
790825 return false;
826+ if (req -> wb_page == prev -> wb_page ) {
827+ if (req -> wb_pgbase != prev -> wb_pgbase + prev -> wb_bytes )
828+ return false;
829+ } else {
830+ if (req -> wb_pgbase != 0 ||
831+ prev -> wb_pgbase + prev -> wb_bytes != PAGE_CACHE_SIZE )
832+ return false;
833+ }
791834 }
792835 size = pgio -> pg_ops -> pg_test (pgio , prev , req );
793836 WARN_ON_ONCE (size > req -> wb_bytes );
@@ -858,13 +901,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
858901 struct nfs_page * subreq ;
859902 unsigned int bytes_left = 0 ;
860903 unsigned int offset , pgbase ;
861- int ret ;
862904
863- ret = nfs_page_group_lock (req , false);
864- if (ret < 0 ) {
865- desc -> pg_error = ret ;
866- return 0 ;
867- }
905+ nfs_page_group_lock (req , false);
868906
869907 subreq = req ;
870908 bytes_left = subreq -> wb_bytes ;
@@ -886,11 +924,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
886924 if (desc -> pg_recoalesce )
887925 return 0 ;
888926 /* retry add_request for this subreq */
889- ret = nfs_page_group_lock (req , false);
890- if (ret < 0 ) {
891- desc -> pg_error = ret ;
892- return 0 ;
893- }
927+ nfs_page_group_lock (req , false);
894928 continue ;
895929 }
896930
0 commit comments