1313
1414#ifdef HAVE_GENERIC_MMU_GATHER
1515
16+ #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
17+
1618static bool tlb_next_batch (struct mmu_gather * tlb )
1719{
1820 struct mmu_gather_batch * batch ;
@@ -41,19 +43,72 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
4143 return true;
4244}
4345
46+ static void tlb_batch_pages_flush (struct mmu_gather * tlb )
47+ {
48+ struct mmu_gather_batch * batch ;
49+
50+ for (batch = & tlb -> local ; batch && batch -> nr ; batch = batch -> next ) {
51+ free_pages_and_swap_cache (batch -> pages , batch -> nr );
52+ batch -> nr = 0 ;
53+ }
54+ tlb -> active = & tlb -> local ;
55+ }
56+
57+ static void tlb_batch_list_free (struct mmu_gather * tlb )
58+ {
59+ struct mmu_gather_batch * batch , * next ;
60+
61+ for (batch = tlb -> local .next ; batch ; batch = next ) {
62+ next = batch -> next ;
63+ free_pages ((unsigned long )batch , 0 );
64+ }
65+ tlb -> local .next = NULL ;
66+ }
67+
68+ bool __tlb_remove_page_size (struct mmu_gather * tlb , struct page * page , int page_size )
69+ {
70+ struct mmu_gather_batch * batch ;
71+
72+ VM_BUG_ON (!tlb -> end );
73+
74+ #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
75+ VM_WARN_ON (tlb -> page_size != page_size );
76+ #endif
77+
78+ batch = tlb -> active ;
79+ /*
80+ * Add the page and check if we are full. If so
81+ * force a flush.
82+ */
83+ batch -> pages [batch -> nr ++ ] = page ;
84+ if (batch -> nr == batch -> max ) {
85+ if (!tlb_next_batch (tlb ))
86+ return true;
87+ batch = tlb -> active ;
88+ }
89+ VM_BUG_ON_PAGE (batch -> nr > batch -> max , page );
90+
91+ return false;
92+ }
93+
94+ #endif /* HAVE_MMU_GATHER_NO_GATHER */
95+
4496void arch_tlb_gather_mmu (struct mmu_gather * tlb , struct mm_struct * mm ,
4597 unsigned long start , unsigned long end )
4698{
4799 tlb -> mm = mm ;
48100
49101 /* Is it from 0 to ~0? */
50102 tlb -> fullmm = !(start | (end + 1 ));
103+
104+ #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
51105 tlb -> need_flush_all = 0 ;
52106 tlb -> local .next = NULL ;
53107 tlb -> local .nr = 0 ;
54108 tlb -> local .max = ARRAY_SIZE (tlb -> __pages );
55109 tlb -> active = & tlb -> local ;
56110 tlb -> batch_count = 0 ;
111+ #endif
57112
58113#ifdef CONFIG_HAVE_RCU_TABLE_FREE
59114 tlb -> batch = NULL ;
@@ -67,16 +122,12 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
67122
68123void tlb_flush_mmu_free (struct mmu_gather * tlb )
69124{
70- struct mmu_gather_batch * batch ;
71-
72125#ifdef CONFIG_HAVE_RCU_TABLE_FREE
73126 tlb_table_flush (tlb );
74127#endif
75- for (batch = & tlb -> local ; batch && batch -> nr ; batch = batch -> next ) {
76- free_pages_and_swap_cache (batch -> pages , batch -> nr );
77- batch -> nr = 0 ;
78- }
79- tlb -> active = & tlb -> local ;
128+ #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
129+ tlb_batch_pages_flush (tlb );
130+ #endif
80131}
81132
82133void tlb_flush_mmu (struct mmu_gather * tlb )
@@ -92,8 +143,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
92143void arch_tlb_finish_mmu (struct mmu_gather * tlb ,
93144 unsigned long start , unsigned long end , bool force )
94145{
95- struct mmu_gather_batch * batch , * next ;
96-
97146 if (force ) {
98147 __tlb_reset_range (tlb );
99148 __tlb_adjust_range (tlb , start , end - start );
@@ -103,45 +152,9 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,
103152
104153 /* keep the page table cache within bounds */
105154 check_pgt_cache ();
106-
107- for (batch = tlb -> local .next ; batch ; batch = next ) {
108- next = batch -> next ;
109- free_pages ((unsigned long )batch , 0 );
110- }
111- tlb -> local .next = NULL ;
112- }
113-
114- /* __tlb_remove_page
115- * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
116- * handling the additional races in SMP caused by other CPUs caching valid
117- * mappings in their TLBs. Returns the number of free page slots left.
118- * When out of page slots we must call tlb_flush_mmu().
119- *returns true if the caller should flush.
120- */
121- bool __tlb_remove_page_size (struct mmu_gather * tlb , struct page * page , int page_size )
122- {
123- struct mmu_gather_batch * batch ;
124-
125- VM_BUG_ON (!tlb -> end );
126-
127- #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
128- VM_WARN_ON (tlb -> page_size != page_size );
155+ #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
156+ tlb_batch_list_free (tlb );
129157#endif
130-
131- batch = tlb -> active ;
132- /*
133- * Add the page and check if we are full. If so
134- * force a flush.
135- */
136- batch -> pages [batch -> nr ++ ] = page ;
137- if (batch -> nr == batch -> max ) {
138- if (!tlb_next_batch (tlb ))
139- return true;
140- batch = tlb -> active ;
141- }
142- VM_BUG_ON_PAGE (batch -> nr > batch -> max , page );
143-
144- return false;
145158}
146159
147160#endif /* HAVE_GENERIC_MMU_GATHER */
0 commit comments