22 #include <linux/hash.h>
23 #include <linux/slab.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
37 static inline u64 kvmppc_mmu_hash_pte(
u64 eaddr)
42 static inline u64 kvmppc_mmu_hash_pte_long(
u64 eaddr)
44 return hash_64((eaddr & 0x0ffff000) >>
PTE_SIZE,
48 static inline u64 kvmppc_mmu_hash_vpte(
u64 vpage)
53 static inline u64 kvmppc_mmu_hash_vpte_long(
u64 vpage)
55 return hash_64((vpage & 0xffffff000ULL) >> 12,
64 trace_kvm_book3s_mmu_map(pte);
69 index = kvmppc_mmu_hash_pte(pte->
pte.eaddr);
73 index = kvmppc_mmu_hash_pte_long(pte->
pte.eaddr);
78 index = kvmppc_mmu_hash_vpte(pte->
pte.vpage);
82 index = kvmppc_mmu_hash_vpte_long(pte->
pte.vpage);
95 static void invalidate_pte(
struct kvm_vcpu *vcpu,
struct hpte_cache *
pte)
99 trace_kvm_book3s_mmu_invalidate(pte);
107 if (hlist_unhashed(&pte->
list_pte)) {
117 if (pte->
pte.may_write)
128 static void kvmppc_mmu_pte_flush_all(
struct kvm_vcpu *vcpu)
131 struct hpte_cache *
pte;
140 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
141 invalidate_pte(vcpu, pte);
152 struct hpte_cache *
pte;
155 list = &vcpu3s->
hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
160 hlist_for_each_entry_rcu(pte, node, list,
list_pte)
161 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
162 invalidate_pte(vcpu, pte);
172 struct hpte_cache *
pte;
176 kvmppc_mmu_hash_pte_long(guest_ea)];
182 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
183 invalidate_pte(vcpu, pte);
190 trace_kvm_book3s_mmu_flush(
"", vcpu, guest_ea, ea_mask);
195 kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
198 kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
202 kvmppc_mmu_pte_flush_all(vcpu);
211 static void kvmppc_mmu_pte_vflush_short(
struct kvm_vcpu *vcpu,
u64 guest_vp)
216 struct hpte_cache *
pte;
217 u64 vp_mask = 0xfffffffffULL;
224 hlist_for_each_entry_rcu(pte, node, list,
list_vpte)
225 if ((pte->pte.vpage & vp_mask) == guest_vp)
226 invalidate_pte(vcpu, pte);
237 struct hpte_cache *
pte;
238 u64 vp_mask = 0xffffff000ULL;
241 kvmppc_mmu_hash_vpte_long(guest_vp)];
247 if ((pte->pte.vpage & vp_mask) == guest_vp)
248 invalidate_pte(vcpu, pte);
255 trace_kvm_book3s_mmu_flush(
"v", vcpu, guest_vp, vp_mask);
260 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
263 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
275 struct hpte_cache *
pte;
278 trace_kvm_book3s_mmu_flush(
"p", vcpu, pa_start, pa_end);
285 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
286 if ((pte->
pte.raddr >= pa_start) &&
287 (pte->
pte.raddr < pa_end))
288 invalidate_pte(vcpu, pte);
297 struct hpte_cache *
pte;
299 pte = kmem_cache_zalloc(hpte_cache,
GFP_KERNEL);
303 kvmppc_mmu_pte_flush_all(vcpu);
313 static void kvmppc_mmu_hpte_init_hash(
struct hlist_head *hash_list,
int len)
317 for (i = 0; i < len; i++)
344 sizeof(
struct hpte_cache), 0,
NULL);