27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
61 if (vcpu->
arch.shared->msr & MSR_PR)
64 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
65 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
67 trace_kvm_book3s_slb_found(gvsid, map->
host_vsid);
71 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
73 trace_kvm_book3s_slb_found(gvsid, map->
host_vsid);
77 trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
96 if (is_error_pfn(hpaddr)) {
106 map = find_sid_vsid(vcpu, vsid);
110 map = find_sid_vsid(vcpu, vsid);
114 vsid, orig_pte->
eaddr);
131 kvmppc_mmu_flush_icache(hpaddr >>
PAGE_SHIFT);
140 if (
ppc_md.hpte_remove(hpteg) < 0) {
145 ret =
ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
157 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
158 vpn, hpaddr, orig_pte);
167 pte->
slot = hpteg + (ret & 7);
169 pte->
pte = *orig_pte;
184 static int backwards_map = 0;
186 if (vcpu->
arch.shared->msr & MSR_PR)
192 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
194 sid_map_mask = SID_MAP_MASK - sid_map_mask;
196 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
199 backwards_map = !backwards_map;
214 trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->
host_vsid);
223 int max_slb_size = 64;
224 int found_inval = -1;
231 for (i = 1; i < svcpu->slb_max; i++) {
234 else if ((svcpu->slb[i].esid &
ESID_MASK) == esid) {
241 if (found_inval > 0) {
252 if ((svcpu->slb_max) == max_slb_size)
274 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr &
ESID_MASK);
276 if (vcpu->
arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
278 svcpu->slb[slb_index].esid = 0;
283 map = find_sid_vsid(vcpu, gvsid);
285 map = create_sid_map(vcpu, gvsid);
291 slb_esid |= slb_index;
293 svcpu->slb[slb_index].esid = slb_esid;
294 svcpu->slb[slb_index].vsid = slb_vsid;
296 trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
307 svcpu->slb[0].esid = 0;