Skip to content

Commit db37648

Browse files
Nick PigginLinus Torvalds
authored andcommitted
[PATCH] mm: non syncing lock_page()
lock_page needs the caller to have a reference on the page->mapping inode due to sync_page, ergo set_page_dirty_lock is obviously buggy according to its comments. Solve it by introducing a new lock_page_nosync which does not do a sync_page. akpm: unpleasant solution to an unpleasant problem. If it goes wrong it could cause great slowdowns while the lock_page() caller waits for kblockd to perform the unplug. And if a filesystem has special sync_page() requirements (none presently do), permanent hangs are possible. otoh, set_page_dirty_lock() is usually (always?) called against userspace pages. They are always up-to-date, so there shouldn't be any pending read I/O against these pages. Signed-off-by: Nick Piggin <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 28e4d96 commit db37648

File tree

3 files changed

+33
-1
lines changed

3 files changed

+33
-1
lines changed

include/linux/pagemap.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,14 +130,29 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
130130
}
131131

132132
extern void FASTCALL(__lock_page(struct page *page));
133+
extern void FASTCALL(__lock_page_nosync(struct page *page));
133134
extern void FASTCALL(unlock_page(struct page *page));
134135

136+
/*
137+
* lock_page may only be called if we have the page's inode pinned.
138+
*/
135139
static inline void lock_page(struct page *page)
136140
{
137141
might_sleep();
138142
if (TestSetPageLocked(page))
139143
__lock_page(page);
140144
}
145+
146+
/*
147+
* lock_page_nosync should only be used if we can't pin the page's inode.
148+
* Doesn't play quite so well with block device plugging.
149+
*/
150+
static inline void lock_page_nosync(struct page *page)
151+
{
152+
might_sleep();
153+
if (TestSetPageLocked(page))
154+
__lock_page_nosync(page);
155+
}
141156

142157
/*
143158
* This is exported only for wait_on_page_locked/wait_on_page_writeback.

mm/filemap.c

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -488,6 +488,12 @@ struct page *page_cache_alloc_cold(struct address_space *x)
488488
EXPORT_SYMBOL(page_cache_alloc_cold);
489489
#endif
490490

491+
static int __sleep_on_page_lock(void *word)
492+
{
493+
io_schedule();
494+
return 0;
495+
}
496+
491497
/*
492498
* In order to wait for pages to become available there must be
493499
* waitqueues associated with pages. By using a hash table of
@@ -577,6 +583,17 @@ void fastcall __lock_page(struct page *page)
577583
}
578584
EXPORT_SYMBOL(__lock_page);
579585

586+
/*
587+
* Variant of lock_page that does not require the caller to hold a reference
588+
* on the page's mapping.
589+
*/
590+
void fastcall __lock_page_nosync(struct page *page)
591+
{
592+
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
593+
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
594+
TASK_UNINTERRUPTIBLE);
595+
}
596+
580597
/**
581598
* find_get_page - find and get a page reference
582599
* @mapping: the address_space to search

mm/page-writeback.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -701,7 +701,7 @@ int set_page_dirty_lock(struct page *page)
701701
{
702702
int ret;
703703

704-
lock_page(page);
704+
lock_page_nosync(page);
705705
ret = set_page_dirty(page);
706706
unlock_page(page);
707707
return ret;

0 commit comments

Comments
 (0)