Skip to content

Commit 36090de

Browse files
arndbtorvalds
authored andcommitted
mm: move tlb_flush_pending inline helpers to mm_inline.h
linux/mm_types.h should only define structure definitions, to make it cheap to include elsewhere. The atomic_t helper function definitions are particularly large, so it's better to move the helpers using those into the existing linux/mm_inline.h and only include that where needed. As a follow-up, we may want to go through all the indirect includes in mm_types.h and reduce them as much as possible. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Arnd Bergmann <[email protected]> Cc: Al Viro <[email protected]> Cc: Stephen Rothwell <[email protected]> Cc: Suren Baghdasaryan <[email protected]> Cc: Colin Cross <[email protected]> Cc: Kees Cook <[email protected]> Cc: Peter Xu <[email protected]> Cc: Peter Zijlstra (Intel) <[email protected]> Cc: Yu Zhao <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Matthew Wilcox (Oracle) <[email protected]> Cc: Eric Biederman <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 17fca13 commit 36090de

File tree

9 files changed

+137
-130
lines changed

9 files changed

+137
-130
lines changed

arch/x86/include/asm/pgtable.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -752,7 +752,7 @@ static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
752752
return true;
753753

754754
if ((pte_flags(a) & _PAGE_PROTNONE) &&
755-
mm_tlb_flush_pending(mm))
755+
atomic_read(&mm->tlb_flush_pending))
756756
return true;
757757

758758
return false;

include/linux/mm.h

Lines changed: 0 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -424,51 +424,6 @@ extern unsigned int kobjsize(const void *objp);
424424
*/
425425
extern pgprot_t protection_map[16];
426426

427-
/**
428-
* enum fault_flag - Fault flag definitions.
429-
* @FAULT_FLAG_WRITE: Fault was a write fault.
430-
* @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
431-
* @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
432-
* @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
433-
* @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
434-
* @FAULT_FLAG_TRIED: The fault has been tried once.
435-
* @FAULT_FLAG_USER: The fault originated in userspace.
436-
* @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
437-
* @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
438-
* @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
439-
*
440-
* About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
441-
* whether we would allow page faults to retry by specifying these two
442-
* fault flags correctly. Currently there can be three legal combinations:
443-
*
444-
* (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
445-
* this is the first try
446-
*
447-
* (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
448-
* we've already tried at least once
449-
*
450-
* (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
451-
*
452-
* The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
453-
* be used. Note that page faults can be allowed to retry for multiple times,
454-
* in which case we'll have an initial fault with flags (a) then later on
455-
* continuous faults with flags (b). We should always try to detect pending
456-
* signals before a retry to make sure the continuous page faults can still be
457-
* interrupted if necessary.
458-
*/
459-
enum fault_flag {
460-
FAULT_FLAG_WRITE = 1 << 0,
461-
FAULT_FLAG_MKWRITE = 1 << 1,
462-
FAULT_FLAG_ALLOW_RETRY = 1 << 2,
463-
FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
464-
FAULT_FLAG_KILLABLE = 1 << 4,
465-
FAULT_FLAG_TRIED = 1 << 5,
466-
FAULT_FLAG_USER = 1 << 6,
467-
FAULT_FLAG_REMOTE = 1 << 7,
468-
FAULT_FLAG_INSTRUCTION = 1 << 8,
469-
FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
470-
};
471-
472427
/*
473428
* The default fault flags that should be used by most of the
474429
* arch-specific page fault handlers.

include/linux/mm_inline.h

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#ifndef LINUX_MM_INLINE_H
33
#define LINUX_MM_INLINE_H
44

5+
#include <linux/atomic.h>
56
#include <linux/huge_mm.h>
67
#include <linux/swap.h>
78
#include <linux/string.h>
@@ -185,4 +186,89 @@ static inline bool is_same_vma_anon_name(struct vm_area_struct *vma,
185186
}
186187
#endif /* CONFIG_ANON_VMA_NAME */
187188

189+
static inline void init_tlb_flush_pending(struct mm_struct *mm)
190+
{
191+
atomic_set(&mm->tlb_flush_pending, 0);
192+
}
193+
194+
static inline void inc_tlb_flush_pending(struct mm_struct *mm)
195+
{
196+
atomic_inc(&mm->tlb_flush_pending);
197+
/*
198+
* The only time this value is relevant is when there are indeed pages
199+
* to flush. And we'll only flush pages after changing them, which
200+
* requires the PTL.
201+
*
202+
* So the ordering here is:
203+
*
204+
* atomic_inc(&mm->tlb_flush_pending);
205+
* spin_lock(&ptl);
206+
* ...
207+
* set_pte_at();
208+
* spin_unlock(&ptl);
209+
*
210+
* spin_lock(&ptl)
211+
* mm_tlb_flush_pending();
212+
* ....
213+
* spin_unlock(&ptl);
214+
*
215+
* flush_tlb_range();
216+
* atomic_dec(&mm->tlb_flush_pending);
217+
*
218+
* Where the increment if constrained by the PTL unlock, it thus
219+
* ensures that the increment is visible if the PTE modification is
220+
* visible. After all, if there is no PTE modification, nobody cares
221+
* about TLB flushes either.
222+
*
223+
* This very much relies on users (mm_tlb_flush_pending() and
224+
* mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
225+
* therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
226+
* locks (PPC) the unlock of one doesn't order against the lock of
227+
* another PTL.
228+
*
229+
* The decrement is ordered by the flush_tlb_range(), such that
230+
* mm_tlb_flush_pending() will not return false unless all flushes have
231+
* completed.
232+
*/
233+
}
234+
235+
static inline void dec_tlb_flush_pending(struct mm_struct *mm)
236+
{
237+
/*
238+
* See inc_tlb_flush_pending().
239+
*
240+
* This cannot be smp_mb__before_atomic() because smp_mb() simply does
241+
* not order against TLB invalidate completion, which is what we need.
242+
*
243+
* Therefore we must rely on tlb_flush_*() to guarantee order.
244+
*/
245+
atomic_dec(&mm->tlb_flush_pending);
246+
}
247+
248+
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
249+
{
250+
/*
251+
* Must be called after having acquired the PTL; orders against that
252+
* PTLs release and therefore ensures that if we observe the modified
253+
* PTE we must also observe the increment from inc_tlb_flush_pending().
254+
*
255+
* That is, it only guarantees to return true if there is a flush
256+
* pending for _this_ PTL.
257+
*/
258+
return atomic_read(&mm->tlb_flush_pending);
259+
}
260+
261+
static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
262+
{
263+
/*
264+
* Similar to mm_tlb_flush_pending(), we must have acquired the PTL
265+
* for which there is a TLB flush pending in order to guarantee
266+
* we've seen both that PTE modification and the increment.
267+
*
268+
* (no requirement on actually still holding the PTL, that is irrelevant)
269+
*/
270+
return atomic_read(&mm->tlb_flush_pending) > 1;
271+
}
272+
273+
188274
#endif

include/linux/mm_types.h

Lines changed: 45 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -692,90 +692,6 @@ extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
692692
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
693693
extern void tlb_finish_mmu(struct mmu_gather *tlb);
694694

695-
static inline void init_tlb_flush_pending(struct mm_struct *mm)
696-
{
697-
atomic_set(&mm->tlb_flush_pending, 0);
698-
}
699-
700-
static inline void inc_tlb_flush_pending(struct mm_struct *mm)
701-
{
702-
atomic_inc(&mm->tlb_flush_pending);
703-
/*
704-
* The only time this value is relevant is when there are indeed pages
705-
* to flush. And we'll only flush pages after changing them, which
706-
* requires the PTL.
707-
*
708-
* So the ordering here is:
709-
*
710-
* atomic_inc(&mm->tlb_flush_pending);
711-
* spin_lock(&ptl);
712-
* ...
713-
* set_pte_at();
714-
* spin_unlock(&ptl);
715-
*
716-
* spin_lock(&ptl)
717-
* mm_tlb_flush_pending();
718-
* ....
719-
* spin_unlock(&ptl);
720-
*
721-
* flush_tlb_range();
722-
* atomic_dec(&mm->tlb_flush_pending);
723-
*
724-
* Where the increment if constrained by the PTL unlock, it thus
725-
* ensures that the increment is visible if the PTE modification is
726-
* visible. After all, if there is no PTE modification, nobody cares
727-
* about TLB flushes either.
728-
*
729-
* This very much relies on users (mm_tlb_flush_pending() and
730-
* mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
731-
* therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
732-
* locks (PPC) the unlock of one doesn't order against the lock of
733-
* another PTL.
734-
*
735-
* The decrement is ordered by the flush_tlb_range(), such that
736-
* mm_tlb_flush_pending() will not return false unless all flushes have
737-
* completed.
738-
*/
739-
}
740-
741-
static inline void dec_tlb_flush_pending(struct mm_struct *mm)
742-
{
743-
/*
744-
* See inc_tlb_flush_pending().
745-
*
746-
* This cannot be smp_mb__before_atomic() because smp_mb() simply does
747-
* not order against TLB invalidate completion, which is what we need.
748-
*
749-
* Therefore we must rely on tlb_flush_*() to guarantee order.
750-
*/
751-
atomic_dec(&mm->tlb_flush_pending);
752-
}
753-
754-
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
755-
{
756-
/*
757-
* Must be called after having acquired the PTL; orders against that
758-
* PTLs release and therefore ensures that if we observe the modified
759-
* PTE we must also observe the increment from inc_tlb_flush_pending().
760-
*
761-
* That is, it only guarantees to return true if there is a flush
762-
* pending for _this_ PTL.
763-
*/
764-
return atomic_read(&mm->tlb_flush_pending);
765-
}
766-
767-
static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
768-
{
769-
/*
770-
* Similar to mm_tlb_flush_pending(), we must have acquired the PTL
771-
* for which there is a TLB flush pending in order to guarantee
772-
* we've seen both that PTE modification and the increment.
773-
*
774-
* (no requirement on actually still holding the PTL, that is irrelevant)
775-
*/
776-
return atomic_read(&mm->tlb_flush_pending) > 1;
777-
}
778-
779695
struct vm_fault;
780696

781697
/**
@@ -890,4 +806,49 @@ typedef struct {
890806
unsigned long val;
891807
} swp_entry_t;
892808

809+
/**
810+
* enum fault_flag - Fault flag definitions.
811+
* @FAULT_FLAG_WRITE: Fault was a write fault.
812+
* @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
813+
* @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
814+
* @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
815+
* @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
816+
* @FAULT_FLAG_TRIED: The fault has been tried once.
817+
* @FAULT_FLAG_USER: The fault originated in userspace.
818+
* @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
819+
* @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
820+
* @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
821+
*
822+
* About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
823+
* whether we would allow page faults to retry by specifying these two
824+
* fault flags correctly. Currently there can be three legal combinations:
825+
*
826+
* (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
827+
* this is the first try
828+
*
829+
* (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
830+
* we've already tried at least once
831+
*
832+
* (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
833+
*
834+
* The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
835+
* be used. Note that page faults can be allowed to retry for multiple times,
836+
* in which case we'll have an initial fault with flags (a) then later on
837+
* continuous faults with flags (b). We should always try to detect pending
838+
* signals before a retry to make sure the continuous page faults can still be
839+
* interrupted if necessary.
840+
*/
841+
enum fault_flag {
842+
FAULT_FLAG_WRITE = 1 << 0,
843+
FAULT_FLAG_MKWRITE = 1 << 1,
844+
FAULT_FLAG_ALLOW_RETRY = 1 << 2,
845+
FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
846+
FAULT_FLAG_KILLABLE = 1 << 4,
847+
FAULT_FLAG_TRIED = 1 << 5,
848+
FAULT_FLAG_USER = 1 << 6,
849+
FAULT_FLAG_REMOTE = 1 << 7,
850+
FAULT_FLAG_INSTRUCTION = 1 << 8,
851+
FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
852+
};
853+
893854
#endif /* _LINUX_MM_TYPES_H */

mm/ksm.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515

1616
#include <linux/errno.h>
1717
#include <linux/mm.h>
18+
#include <linux/mm_inline.h>
1819
#include <linux/fs.h>
1920
#include <linux/mman.h>
2021
#include <linux/sched.h>

mm/mapping_dirty_helpers.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include <linux/hugetlb.h>
44
#include <linux/bitops.h>
55
#include <linux/mmu_notifier.h>
6+
#include <linux/mm_inline.h>
67
#include <asm/cacheflush.h>
78
#include <asm/tlbflush.h>
89

mm/memory.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@
4141

4242
#include <linux/kernel_stat.h>
4343
#include <linux/mm.h>
44+
#include <linux/mm_inline.h>
4445
#include <linux/sched/mm.h>
4546
#include <linux/sched/coredump.h>
4647
#include <linux/sched/numa_balancing.h>

mm/mmu_gather.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include <linux/kernel.h>
44
#include <linux/mmdebug.h>
55
#include <linux/mm_types.h>
6+
#include <linux/mm_inline.h>
67
#include <linux/pagemap.h>
78
#include <linux/rcupdate.h>
89
#include <linux/smp.h>

mm/pgtable-generic.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <linux/pagemap.h>
1111
#include <linux/hugetlb.h>
1212
#include <linux/pgtable.h>
13+
#include <linux/mm_inline.h>
1314
#include <asm/tlb.h>
1415

1516
/*

0 commit comments

Comments
 (0)