20 #include <linux/types.h>
21 #include <linux/string.h>
26 #include <asm/tlbflush.h>
33 #define dprintk(X...) printk(KERN_INFO X)
35 #define dprintk(X...) do { } while(0)
38 static void kvmppc_mmu_book3s_64_reset_msr(
struct kvm_vcpu *
vcpu)
43 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
51 for (i = 0; i < vcpu->
arch.slb_nr; i++) {
54 if (!vcpu->
arch.slb[i].valid)
57 if (vcpu->
arch.slb[i].tb)
60 if (vcpu->
arch.slb[i].esid == cmp_esid)
61 return &vcpu->
arch.slb[
i];
64 dprintk(
"KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
65 eaddr, esid, esid_1t);
66 for (i = 0; i < vcpu->
arch.slb_nr; i++) {
67 if (vcpu->
arch.slb[i].vsid)
68 dprintk(
" %d: %c%c%c %llx %llx\n", i,
69 vcpu->
arch.slb[i].valid ?
'v' :
' ',
70 vcpu->
arch.slb[i].large ?
'l' :
' ',
71 vcpu->
arch.slb[i].tb ?
't' :
' ',
72 vcpu->
arch.slb[i].esid,
73 vcpu->
arch.slb[i].vsid);
84 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
89 return (((
u64)eaddr >> 12) & 0xfffffff) |
92 return (((
u64)eaddr >> 12) & 0xffff) | (((
u64)slb->
vsid) << 16);
95 static int kvmppc_mmu_book3s_64_get_pagesize(
struct kvmppc_slb *slbe)
97 return slbe->
large ? 24 : 12;
102 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
103 return ((eaddr & 0xfffffff) >> p);
106 static hva_t kvmppc_mmu_book3s_64_get_pteg(
115 page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
116 htabsize = ((1 << ((vcpu_book3s->
sdr1 & 0x1f) + 11)) - 1);
121 hash &= ((1ULL << 39ULL) - 1ULL);
125 pteg = vcpu_book3s->
sdr1 & 0xfffffffffffc0000ULL;
128 dprintk(
"MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
129 page, vcpu_book3s->
sdr1, pteg, slbe->
vsid);
133 if (vcpu_book3s->
vcpu.arch.papr_enabled)
138 if (kvm_is_error_hva(r))
145 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
148 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
149 avpn |= slbe->
vsid << (28 -
p);
152 avpn >>= ((80 -
p) - 56) - 8;
159 static int kvmppc_mmu_book3s_64_xlate(
struct kvm_vcpu *vcpu,
gva_t eaddr,
170 bool perm_err =
false;
176 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
177 !(vcpu->
arch.shared->msr & MSR_PR)) {
179 gpte->
vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
189 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
194 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
195 if (kvm_is_error_hva(ptegp))
198 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
205 if ((vcpu->
arch.shared->msr & MSR_PR) && slbe->
Kp)
207 else if (!(vcpu->
arch.shared->msr & MSR_PR) && slbe->
Ks)
210 for (i=0; i<16; i+=2) {
224 int eaddr_mask = 0xFFF;
227 gpte->
vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu,
231 eaddr_mask = 0xFFFFFF;
256 dprintk(
"KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
267 u32 oldr = pteg[i+1];
277 dprintk(
"KVM: Mapping read-only page!\n");
281 if (pteg[i+1] != oldr)
286 dprintk(
"KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx "
288 eaddr, to_book3s(vcpu)->
sdr1, ptegp);
289 for (i = 0; i < 16; i += 2)
290 dprintk(
" %02d: 0x%llx - 0x%llx (0x%llx)\n",
291 i, pteg[i], pteg[i+1], avpn);
310 dprintk(
"KVM MMU: Trigger segment fault\n");
314 static void kvmppc_mmu_book3s_64_slbmte(
struct kvm_vcpu *vcpu,
u64 rs,
u64 rb)
321 dprintk(
"KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
323 vcpu_book3s = to_book3s(vcpu);
329 if (slb_nr > vcpu->
arch.slb_nr)
332 slbe = &vcpu->
arch.slb[slb_nr];
337 slbe->
vsid = rs >> 12;
351 static u64 kvmppc_mmu_book3s_64_slbmfee(
struct kvm_vcpu *vcpu,
u64 slb_nr)
355 if (slb_nr > vcpu->
arch.slb_nr)
358 slbe = &vcpu->
arch.slb[slb_nr];
363 static u64 kvmppc_mmu_book3s_64_slbmfev(
struct kvm_vcpu *vcpu,
u64 slb_nr)
367 if (slb_nr > vcpu->
arch.slb_nr)
370 slbe = &vcpu->
arch.slb[slb_nr];
375 static void kvmppc_mmu_book3s_64_slbie(
struct kvm_vcpu *vcpu,
u64 ea)
379 dprintk(
"KVM MMU: slbie(0x%llx)\n", ea);
381 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
386 dprintk(
"KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->
esid);
393 static void kvmppc_mmu_book3s_64_slbia(
struct kvm_vcpu *vcpu)
399 for (i = 1; i < vcpu->
arch.slb_nr; i++)
400 vcpu->
arch.slb[i].valid =
false;
402 if (vcpu->
arch.shared->msr & MSR_IR) {
408 static void kvmppc_mmu_book3s_64_mtsrin(
struct kvm_vcpu *vcpu,
u32 srnum,
431 dprintk(
"KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
434 rb |= (srnum & 0xf) << 28;
441 rs |= (value & 0xfffffff) << 12;
443 rs |= ((value >> 28) & 0x7) << 9;
445 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
448 static void kvmppc_mmu_book3s_64_tlbie(
struct kvm_vcpu *vcpu,
ulong va,
453 dprintk(
"KVM MMU: tlbie(0x%lx)\n", va);
456 mask = 0xFFFFFF000ULL;
460 static int kvmppc_mmu_book3s_64_esid_to_vsid(
struct kvm_vcpu *vcpu,
ulong esid,
468 if (vcpu->
arch.shared->msr & (MSR_DR|MSR_IR)) {
469 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
474 switch (vcpu->
arch.shared->msr & (MSR_DR|MSR_IR)) {
495 if (vcpu->
arch.shared->msr & MSR_PR)
503 unlikely(esid == (mp_ea >> SID_SHIFT)) &&
504 !(vcpu->
arch.shared->msr & MSR_PR)) {
512 static bool kvmppc_mmu_book3s_64_is_dcbz32(
struct kvm_vcpu *vcpu)
514 return (to_book3s(vcpu)->hid[5] & 0x80);
522 mmu->
mtsrin = kvmppc_mmu_book3s_64_mtsrin;
523 mmu->
slbmte = kvmppc_mmu_book3s_64_slbmte;
524 mmu->
slbmfee = kvmppc_mmu_book3s_64_slbmfee;
525 mmu->
slbmfev = kvmppc_mmu_book3s_64_slbmfev;
526 mmu->
slbie = kvmppc_mmu_book3s_64_slbie;
527 mmu->
slbia = kvmppc_mmu_book3s_64_slbia;
528 mmu->
xlate = kvmppc_mmu_book3s_64_xlate;
529 mmu->
reset_msr = kvmppc_mmu_book3s_64_reset_msr;
530 mmu->
tlbie = kvmppc_mmu_book3s_64_tlbie;
532 mmu->
ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
533 mmu->
is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;