@@ -692,90 +692,6 @@ extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
692692extern void tlb_gather_mmu_fullmm (struct mmu_gather * tlb , struct mm_struct * mm );
693693extern void tlb_finish_mmu (struct mmu_gather * tlb );
694694
695- static inline void init_tlb_flush_pending (struct mm_struct * mm )
696- {
697- atomic_set (& mm -> tlb_flush_pending , 0 );
698- }
699-
700- static inline void inc_tlb_flush_pending (struct mm_struct * mm )
701- {
702- atomic_inc (& mm -> tlb_flush_pending );
703- /*
704- * The only time this value is relevant is when there are indeed pages
705- * to flush. And we'll only flush pages after changing them, which
706- * requires the PTL.
707- *
708- * So the ordering here is:
709- *
710- * atomic_inc(&mm->tlb_flush_pending);
711- * spin_lock(&ptl);
712- * ...
713- * set_pte_at();
714- * spin_unlock(&ptl);
715- *
716- * spin_lock(&ptl)
717- * mm_tlb_flush_pending();
718- * ....
719- * spin_unlock(&ptl);
720- *
721- * flush_tlb_range();
722- * atomic_dec(&mm->tlb_flush_pending);
723- *
724- * Where the increment if constrained by the PTL unlock, it thus
725- * ensures that the increment is visible if the PTE modification is
726- * visible. After all, if there is no PTE modification, nobody cares
727- * about TLB flushes either.
728- *
729- * This very much relies on users (mm_tlb_flush_pending() and
730- * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
731- * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
732- * locks (PPC) the unlock of one doesn't order against the lock of
733- * another PTL.
734- *
735- * The decrement is ordered by the flush_tlb_range(), such that
736- * mm_tlb_flush_pending() will not return false unless all flushes have
737- * completed.
738- */
739- }
740-
741- static inline void dec_tlb_flush_pending (struct mm_struct * mm )
742- {
743- /*
744- * See inc_tlb_flush_pending().
745- *
746- * This cannot be smp_mb__before_atomic() because smp_mb() simply does
747- * not order against TLB invalidate completion, which is what we need.
748- *
749- * Therefore we must rely on tlb_flush_*() to guarantee order.
750- */
751- atomic_dec (& mm -> tlb_flush_pending );
752- }
753-
754- static inline bool mm_tlb_flush_pending (struct mm_struct * mm )
755- {
756- /*
757- * Must be called after having acquired the PTL; orders against that
758- * PTLs release and therefore ensures that if we observe the modified
759- * PTE we must also observe the increment from inc_tlb_flush_pending().
760- *
761- * That is, it only guarantees to return true if there is a flush
762- * pending for _this_ PTL.
763- */
764- return atomic_read (& mm -> tlb_flush_pending );
765- }
766-
767- static inline bool mm_tlb_flush_nested (struct mm_struct * mm )
768- {
769- /*
770- * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
771- * for which there is a TLB flush pending in order to guarantee
772- * we've seen both that PTE modification and the increment.
773- *
774- * (no requirement on actually still holding the PTL, that is irrelevant)
775- */
776- return atomic_read (& mm -> tlb_flush_pending ) > 1 ;
777- }
778-
779695struct vm_fault ;
780696
781697/**
@@ -890,4 +806,49 @@ typedef struct {
890806 unsigned long val ;
891807} swp_entry_t ;
892808
809+ /**
810+ * enum fault_flag - Fault flag definitions.
811+ * @FAULT_FLAG_WRITE: Fault was a write fault.
812+ * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
813+ * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
814+ * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
815+ * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
816+ * @FAULT_FLAG_TRIED: The fault has been tried once.
817+ * @FAULT_FLAG_USER: The fault originated in userspace.
818+ * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
819+ * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
820+ * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
821+ *
822+ * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
823+ * whether we would allow page faults to retry by specifying these two
824+ * fault flags correctly. Currently there can be three legal combinations:
825+ *
826+ * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
827+ * this is the first try
828+ *
829+ * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
830+ * we've already tried at least once
831+ *
832+ * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
833+ *
834+ * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
835+ * be used. Note that page faults can be allowed to retry for multiple times,
836+ * in which case we'll have an initial fault with flags (a) then later on
837+ * continuous faults with flags (b). We should always try to detect pending
838+ * signals before a retry to make sure the continuous page faults can still be
839+ * interrupted if necessary.
840+ */
841+ enum fault_flag {
842+ FAULT_FLAG_WRITE = 1 << 0 ,
843+ FAULT_FLAG_MKWRITE = 1 << 1 ,
844+ FAULT_FLAG_ALLOW_RETRY = 1 << 2 ,
845+ FAULT_FLAG_RETRY_NOWAIT = 1 << 3 ,
846+ FAULT_FLAG_KILLABLE = 1 << 4 ,
847+ FAULT_FLAG_TRIED = 1 << 5 ,
848+ FAULT_FLAG_USER = 1 << 6 ,
849+ FAULT_FLAG_REMOTE = 1 << 7 ,
850+ FAULT_FLAG_INSTRUCTION = 1 << 8 ,
851+ FAULT_FLAG_INTERRUPTIBLE = 1 << 9 ,
852+ };
853+
893854#endif /* _LINUX_MM_TYPES_H */
0 commit comments