@@ -54,6 +54,70 @@ static void *real_vmalloc_addr(void *x)
5454 return __va (addr );
5555}
5656
57+ /*
58+ * Add this HPTE into the chain for the real page.
59+ * Must be called with the chain locked; it unlocks the chain.
60+ */
61+ static void kvmppc_add_revmap_chain (struct kvm * kvm , struct revmap_entry * rev ,
62+ unsigned long * rmap , long pte_index , int realmode )
63+ {
64+ struct revmap_entry * head , * tail ;
65+ unsigned long i ;
66+
67+ if (* rmap & KVMPPC_RMAP_PRESENT ) {
68+ i = * rmap & KVMPPC_RMAP_INDEX ;
69+ head = & kvm -> arch .revmap [i ];
70+ if (realmode )
71+ head = real_vmalloc_addr (head );
72+ tail = & kvm -> arch .revmap [head -> back ];
73+ if (realmode )
74+ tail = real_vmalloc_addr (tail );
75+ rev -> forw = i ;
76+ rev -> back = head -> back ;
77+ tail -> forw = pte_index ;
78+ head -> back = pte_index ;
79+ } else {
80+ rev -> forw = rev -> back = pte_index ;
81+ i = pte_index ;
82+ }
83+ smp_wmb ();
84+ * rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT ; /* unlock */
85+ }
86+
87+ /* Remove this HPTE from the chain for a real page */
88+ static void remove_revmap_chain (struct kvm * kvm , long pte_index ,
89+ unsigned long hpte_v )
90+ {
91+ struct revmap_entry * rev , * next , * prev ;
92+ unsigned long gfn , ptel , head ;
93+ struct kvm_memory_slot * memslot ;
94+ unsigned long * rmap ;
95+
96+ rev = real_vmalloc_addr (& kvm -> arch .revmap [pte_index ]);
97+ ptel = rev -> guest_rpte ;
98+ gfn = hpte_rpn (ptel , hpte_page_size (hpte_v , ptel ));
99+ memslot = builtin_gfn_to_memslot (kvm , gfn );
100+ if (!memslot || (memslot -> flags & KVM_MEMSLOT_INVALID ))
101+ return ;
102+
103+ rmap = real_vmalloc_addr (& memslot -> rmap [gfn - memslot -> base_gfn ]);
104+ lock_rmap (rmap );
105+
106+ head = * rmap & KVMPPC_RMAP_INDEX ;
107+ next = real_vmalloc_addr (& kvm -> arch .revmap [rev -> forw ]);
108+ prev = real_vmalloc_addr (& kvm -> arch .revmap [rev -> back ]);
109+ next -> back = rev -> back ;
110+ prev -> forw = rev -> forw ;
111+ if (head == pte_index ) {
112+ head = rev -> forw ;
113+ if (head == pte_index )
114+ * rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX );
115+ else
116+ * rmap = (* rmap & ~KVMPPC_RMAP_INDEX ) | head ;
117+ }
118+ unlock_rmap (rmap );
119+ }
120+
57121long kvmppc_h_enter (struct kvm_vcpu * vcpu , unsigned long flags ,
58122 long pte_index , unsigned long pteh , unsigned long ptel )
59123{
@@ -66,6 +130,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
66130 struct kvm_memory_slot * memslot ;
67131 unsigned long * physp , pte_size ;
68132 unsigned long is_io ;
133+ unsigned long * rmap ;
69134 bool realmode = vcpu -> arch .vcore -> vcore_state == VCORE_RUNNING ;
70135
71136 psize = hpte_page_size (pteh , ptel );
@@ -83,6 +148,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
83148 if (!slot_is_aligned (memslot , psize ))
84149 return H_PARAMETER ;
85150 slot_fn = gfn - memslot -> base_gfn ;
151+ rmap = & memslot -> rmap [slot_fn ];
86152
87153 physp = kvm -> arch .slot_phys [memslot -> id ];
88154 if (!physp )
@@ -164,13 +230,25 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
164230 }
165231
166232 /* Save away the guest's idea of the second HPTE dword */
167- rev = real_vmalloc_addr (& kvm -> arch .revmap [pte_index ]);
233+ rev = & kvm -> arch .revmap [pte_index ];
234+ if (realmode )
235+ rev = real_vmalloc_addr (rev );
168236 if (rev )
169237 rev -> guest_rpte = g_ptel ;
238+
239+ /* Link HPTE into reverse-map chain */
240+ if (realmode )
241+ rmap = real_vmalloc_addr (rmap );
242+ lock_rmap (rmap );
243+ kvmppc_add_revmap_chain (kvm , rev , rmap , pte_index , realmode );
244+
170245 hpte [1 ] = ptel ;
246+
247+ /* Write the first HPTE dword, unlocking the HPTE and making it valid */
171248 eieio ();
172249 hpte [0 ] = pteh ;
173250 asm volatile ("ptesync" : : : "memory" );
251+
174252 vcpu -> arch .gpr [4 ] = pte_index ;
175253 return H_SUCCESS ;
176254}
@@ -220,6 +298,8 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
220298 vcpu -> arch .gpr [4 ] = v = hpte [0 ] & ~HPTE_V_HVLOCK ;
221299 vcpu -> arch .gpr [5 ] = r = hpte [1 ];
222300 rb = compute_tlbie_rb (v , r , pte_index );
301+ remove_revmap_chain (kvm , pte_index , v );
302+ smp_wmb ();
223303 hpte [0 ] = 0 ;
224304 if (!(flags & H_LOCAL )) {
225305 while (!try_lock_tlbie (& kvm -> arch .tlbie_lock ))
@@ -293,6 +373,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
293373 flags |= (hp [1 ] >> 5 ) & 0x0c ;
294374 args [i * 2 ] = ((0x80 | flags ) << 56 ) + pte_index ;
295375 tlbrb [n_inval ++ ] = compute_tlbie_rb (hp [0 ], hp [1 ], pte_index );
376+ remove_revmap_chain (kvm , pte_index , hp [0 ]);
377+ smp_wmb ();
296378 hp [0 ] = 0 ;
297379 }
298380 if (n_inval == 0 )
0 commit comments