|
12 | 12 | #include <asm/uaccess.h> |
13 | 13 | #include <linux/gfp.h> |
14 | 14 | #include <linux/bitops.h> |
| 15 | +#include <linux/hardirq.h> /* for in_interrupt() */ |
15 | 16 |
|
16 | 17 | /* |
17 | 18 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page |
@@ -62,6 +63,98 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
62 | 63 | #define page_cache_release(page) put_page(page) |
63 | 64 | void release_pages(struct page **pages, int nr, int cold); |
64 | 65 |
|
| 66 | +/* |
| 67 | + * speculatively take a reference to a page. |
| 68 | + * If the page is free (_count == 0), then _count is untouched, and 0 |
| 69 | + * is returned. Otherwise, _count is incremented by 1 and 1 is returned. |
| 70 | + * |
| 71 | + * This function must be called inside the same rcu_read_lock() section as has |
| 72 | + * been used to lookup the page in the pagecache radix-tree (or page table): |
| 73 | + * this allows allocators to use a synchronize_rcu() to stabilize _count. |
| 74 | + * |
| 75 | + * Unless an RCU grace period has passed, the count of all pages coming out |
| 76 | + * of the allocator must be considered unstable. page_count may return higher |
| 77 | + * than expected, and put_page must be able to do the right thing when the |
| 78 | + * page has been finished with, no matter what it is subsequently allocated |
| 79 | + * for (because put_page is what is used here to drop an invalid speculative |
| 80 | + * reference). |
| 81 | + * |
| 82 | + * This is the interesting part of the lockless pagecache (and lockless |
| 83 | + * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) |
| 84 | + * has the following pattern: |
| 85 | + * 1. find page in radix tree |
| 86 | + * 2. conditionally increment refcount |
| 87 | + * 3. check the page is still in pagecache (if no, goto 1) |
| 88 | + * |
| 89 | + * Remove-side that cares about stability of _count (eg. reclaim) has the |
| 90 | + * following (with tree_lock held for write): |
| 91 | + * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) |
| 92 | + * B. remove page from pagecache |
| 93 | + * C. free the page |
| 94 | + * |
| 95 | + * There are 2 critical interleavings that matter: |
| 96 | + * - 2 runs before A: in this case, A sees elevated refcount and bails out |
| 97 | + * - A runs before 2: in this case, 2 sees zero refcount and retries; |
| 98 | + * subsequently, B will complete and 1 will find no page, causing the |
| 99 | + * lookup to return NULL. |
| 100 | + * |
| 101 | + * It is possible that between 1 and 2, the page is removed then the exact same |
| 102 | + * page is inserted into the same position in pagecache. That's OK: the |
| 103 | + * old find_get_page using tree_lock could equally have run before or after |
| 104 | + * such a re-insertion, depending on order that locks are granted. |
| 105 | + * |
| 106 | + * Lookups racing against pagecache insertion isn't a big problem: either 1 |
| 107 | + * will find the page or it will not. Likewise, the old find_get_page could run |
| 108 | + * either before the insertion or afterwards, depending on timing. |
| 109 | + */ |
| 110 | +static inline int page_cache_get_speculative(struct page *page) |
| 111 | +{ |
| 112 | + VM_BUG_ON(in_interrupt()); |
| 113 | + |
| 114 | +#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) |
| 115 | +# ifdef CONFIG_PREEMPT |
| 116 | + VM_BUG_ON(!in_atomic()); |
| 117 | +# endif |
| 118 | + /* |
| 119 | + * Preempt must be disabled here - we rely on rcu_read_lock doing |
| 120 | + * this for us. |
| 121 | + * |
| 122 | + * Pagecache won't be truncated from interrupt context, so if we have |
| 123 | + * found a page in the radix tree here, we have pinned its refcount by |
| 124 | + * disabling preempt, and hence no need for the "speculative get" that |
| 125 | + * SMP requires. |
| 126 | + */ |
| 127 | + VM_BUG_ON(page_count(page) == 0); |
| 128 | + atomic_inc(&page->_count); |
| 129 | + |
| 130 | +#else |
| 131 | + if (unlikely(!get_page_unless_zero(page))) { |
| 132 | + /* |
| 133 | + * Either the page has been freed, or will be freed. |
| 134 | + * In either case, retry here and the caller should |
| 135 | + * do the right thing (see comments above). |
| 136 | + */ |
| 137 | + return 0; |
| 138 | + } |
| 139 | +#endif |
| 140 | + VM_BUG_ON(PageTail(page)); |
| 141 | + |
| 142 | + return 1; |
| 143 | +} |
| 144 | + |
| 145 | +static inline int page_freeze_refs(struct page *page, int count) |
| 146 | +{ |
| 147 | + return likely(atomic_cmpxchg(&page->_count, count, 0) == count); |
| 148 | +} |
| 149 | + |
| 150 | +static inline void page_unfreeze_refs(struct page *page, int count) |
| 151 | +{ |
| 152 | + VM_BUG_ON(page_count(page) != 0); |
| 153 | + VM_BUG_ON(count == 0); |
| 154 | + |
| 155 | + atomic_set(&page->_count, count); |
| 156 | +} |
| 157 | + |
65 | 158 | #ifdef CONFIG_NUMA |
66 | 159 | extern struct page *__page_cache_alloc(gfp_t gfp); |
67 | 160 | #else |
@@ -133,13 +226,29 @@ static inline struct page *read_mapping_page(struct address_space *mapping, |
133 | 226 | return read_cache_page(mapping, index, filler, data); |
134 | 227 | } |
135 | 228 |
|
136 | | -int add_to_page_cache(struct page *page, struct address_space *mapping, |
| 229 | +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
137 | 230 | pgoff_t index, gfp_t gfp_mask); |
138 | 231 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
139 | 232 | pgoff_t index, gfp_t gfp_mask); |
140 | 233 | extern void remove_from_page_cache(struct page *page); |
141 | 234 | extern void __remove_from_page_cache(struct page *page); |
142 | 235 |
|
| 236 | +/* |
| 237 | + * Like add_to_page_cache_locked, but used to add newly allocated pages: |
| 238 | + * the page is new, so we can just run SetPageLocked() against it. |
| 239 | + */ |
| 240 | +static inline int add_to_page_cache(struct page *page, |
| 241 | + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) |
| 242 | +{ |
| 243 | + int error; |
| 244 | + |
| 245 | + SetPageLocked(page); |
| 246 | + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
| 247 | + if (unlikely(error)) |
| 248 | + ClearPageLocked(page); |
| 249 | + return error; |
| 250 | +} |
| 251 | + |
143 | 252 | /* |
144 | 253 | * Return byte-offset into filesystem object for page. |
145 | 254 | */ |
|
0 commit comments