9 #include <linux/types.h>
10 #include <linux/string.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
25 static void *real_vmalloc_addr(
void *
x)
27 unsigned long addr = (
unsigned long) x;
43 unsigned long *rmap,
long pte_index,
int realmode)
50 head = &kvm->
arch.revmap[
i];
52 head = real_vmalloc_addr(head);
53 tail = &kvm->
arch.revmap[head->
back];
55 tail = real_vmalloc_addr(tail);
72 unsigned long hpte_v,
unsigned long hpte_r)
75 unsigned long gfn, ptel,
head;
82 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
87 rmap = real_vmalloc_addr(&memslot->
arch.rmap[gfn - memslot->
base_gfn]);
91 next = real_vmalloc_addr(&kvm->
arch.revmap[rev->
forw]);
92 prev = real_vmalloc_addr(&kvm->
arch.revmap[rev->
back]);
95 if (head == pte_index) {
97 if (head == pte_index)
100 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
106 static pte_t lookup_linux_pte(
struct kvm_vcpu *vcpu,
unsigned long hva,
107 int writing,
unsigned long *pte_sizep)
110 unsigned long ps = *pte_sizep;
117 *pte_sizep = 1ul << shift;
124 return kvmppc_read_update_linux_pte(ptep, writing);
127 static inline void unlock_hpte(
unsigned long *hpte,
unsigned long hpte_v)
129 asm volatile(PPC_RELEASE_BARRIER
"" : : :
"memory");
134 long pte_index,
unsigned long pteh,
unsigned long ptel)
136 struct kvm *kvm = vcpu->
kvm;
137 unsigned long i, pa, gpa, gfn, psize;
138 unsigned long slot_fn, hva;
141 unsigned long g_ptel = ptel;
143 unsigned long *physp, pte_size;
147 unsigned int writing;
148 unsigned long mmu_seq;
149 unsigned long rcbits;
152 psize = hpte_page_size(pteh, ptel);
155 writing = hpte_is_writable(ptel);
159 mmu_seq = kvm->mmu_notifier_seq;
180 if (!slot_is_aligned(memslot, psize))
183 rmap = &memslot->
arch.rmap[slot_fn];
185 if (!kvm->
arch.using_mmu_notifiers) {
186 physp = kvm->
arch.slot_phys[memslot->
id];
191 physp = real_vmalloc_addr(physp);
200 hva = __gfn_to_hva_memslot(memslot, gfn);
204 pte = lookup_linux_pte(vcpu, hva, writing, &pte_size);
208 ptel = hpte_make_readonly(ptel);
209 is_io = hpte_cache_bits(
pte_val(pte));
213 if (pte_size < psize)
215 if (pa && pte_size > psize)
216 pa |= gpa & (pte_size - 1);
227 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
240 if (pte_index >= kvm->
arch.hpt_npte)
242 if (
likely((flags & H_EXACT) == 0)) {
244 hpte = (
unsigned long *)(kvm->
arch.hpt_virt + (pte_index << 4));
245 for (i = 0; i < 8; ++
i) {
260 for (i = 0; i < 8; ++
i) {
273 hpte = (
unsigned long *)(kvm->
arch.hpt_virt + (pte_index << 4));
289 rev = real_vmalloc_addr(rev);
296 rmap = real_vmalloc_addr(rmap);
299 if (kvm->
arch.using_mmu_notifiers &&
300 mmu_notifier_retry(vcpu, mmu_seq)) {
303 pteh &= ~HPTE_V_VALID;
319 asm volatile(
"ptesync" : : :
"memory");
326 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
328 static inline int try_lock_tlbie(
unsigned int *lock)
330 unsigned int tmp, old;
333 asm volatile(
"1:lwarx %1,0,%2\n"
340 :
"=&r" (
tmp),
"=&r" (old)
341 :
"r" (lock),
"r" (token)
347 unsigned long pte_index,
unsigned long avpn,
350 struct kvm *kvm = vcpu->
kvm;
352 unsigned long v,
r,
rb;
355 if (pte_index >= kvm->
arch.hpt_npte)
357 hpte = (
unsigned long *)(kvm->
arch.hpt_virt + (pte_index << 4));
361 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
362 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
367 rev = real_vmalloc_addr(&kvm->
arch.revmap[pte_index]);
370 hpte[0] &= ~HPTE_V_VALID;
371 rb = compute_tlbie_rb(v, hpte[1], pte_index);
373 while (!try_lock_tlbie(&kvm->
arch.tlbie_lock))
375 asm volatile(
"ptesync" : : :
"memory");
376 asm volatile(
PPC_TLBIE(%1,%0)
"; eieio; tlbsync"
377 : :
"r" (
rb),
"r" (kvm->
arch.lpid));
378 asm volatile(
"ptesync" : : :
"memory");
379 kvm->
arch.tlbie_lock = 0;
381 asm volatile(
"ptesync" : : :
"memory");
382 asm volatile(
"tlbiel %0" : :
"r" (
rb));
383 asm volatile(
"ptesync" : : :
"memory");
386 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
389 unlock_hpte(hpte, 0);
391 vcpu->
arch.gpr[4] =
v;
392 vcpu->
arch.gpr[5] =
r;
398 struct kvm *kvm = vcpu->
kvm;
399 unsigned long *
args = &vcpu->
arch.gpr[4];
400 unsigned long *hp, *hptes[4], tlbrb[4];
401 long int i,
j,
k,
n, found, indexes[4];
404 long int ret = H_SUCCESS;
409 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
414 flags = pte_index >> 56;
415 pte_index &= ((1ul << 56) - 1);
422 if (req != 1 || flags == 3 ||
423 pte_index >= kvm->
arch.hpt_npte) {
429 hp = (
unsigned long *)
430 (kvm->
arch.hpt_virt + (pte_index << 4));
445 if (!(hp[0] & args[j + 1]))
449 if ((hp[0] & ~0x7fUL) == args[j + 1])
461 rev = real_vmalloc_addr(&kvm->
arch.revmap[pte_index]);
466 args[
j] |= rcbits << (56 - 5);
471 hp[0] &= ~HPTE_V_VALID;
472 tlbrb[
n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
484 while(!try_lock_tlbie(&kvm->
arch.tlbie_lock))
486 asm volatile(
"ptesync" : : :
"memory");
487 for (k = 0; k <
n; ++
k)
490 "r" (kvm->
arch.lpid));
491 asm volatile(
"eieio; tlbsync; ptesync" : : :
"memory");
492 kvm->
arch.tlbie_lock = 0;
494 asm volatile(
"ptesync" : : :
"memory");
495 for (k = 0; k <
n; ++
k)
496 asm volatile(
"tlbiel %0" : :
"r" (tlbrb[k]));
497 asm volatile(
"ptesync" : : :
"memory");
501 for (k = 0; k <
n; ++
k) {
503 pte_index = args[
j] & ((1ul << 56) - 1);
506 remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
508 args[
j] |= rcbits << (56 - 5);
517 unsigned long pte_index,
unsigned long avpn,
520 struct kvm *kvm = vcpu->
kvm;
525 if (pte_index >= kvm->
arch.hpt_npte)
528 hpte = (
unsigned long *)(kvm->
arch.hpt_virt + (pte_index << 4));
532 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
547 rev = real_vmalloc_addr(&kvm->
arch.revmap[pte_index]);
552 r = (hpte[1] & ~mask) | bits;
556 rb = compute_tlbie_rb(v, r, pte_index);
557 hpte[0] = v & ~HPTE_V_VALID;
558 if (!(flags & H_LOCAL)) {
559 while(!try_lock_tlbie(&kvm->
arch.tlbie_lock))
561 asm volatile(
"ptesync" : : :
"memory");
562 asm volatile(
PPC_TLBIE(%1,%0)
"; eieio; tlbsync"
563 : :
"r" (
rb),
"r" (kvm->
arch.lpid));
564 asm volatile(
"ptesync" : : :
"memory");
565 kvm->
arch.tlbie_lock = 0;
567 asm volatile(
"ptesync" : : :
"memory");
568 asm volatile(
"tlbiel %0" : :
"r" (
rb));
569 asm volatile(
"ptesync" : : :
"memory");
575 asm volatile(
"ptesync" : : :
"memory");
580 unsigned long pte_index)
582 struct kvm *kvm = vcpu->
kvm;
583 unsigned long *hpte,
v,
r;
587 if (pte_index >= kvm->
arch.hpt_npte)
589 if (flags & H_READ_4) {
593 rev = real_vmalloc_addr(&kvm->
arch.revmap[pte_index]);
595 hpte = (
unsigned long *)(kvm->
arch.hpt_virt + (pte_index << 4));
604 vcpu->
arch.gpr[4 + i * 2] =
v;
605 vcpu->
arch.gpr[5 + i * 2] =
r;
611 unsigned long pte_index)
616 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
617 while (!try_lock_tlbie(&kvm->
arch.tlbie_lock))
619 asm volatile(
"ptesync" : : :
"memory");
620 asm volatile(
PPC_TLBIE(%1,%0)
"; eieio; tlbsync"
621 : :
"r" (
rb),
"r" (kvm->
arch.lpid));
622 asm volatile(
"ptesync" : : :
"memory");
623 kvm->
arch.tlbie_lock = 0;
628 unsigned long pte_index)
633 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
634 rbyte = (hptep[1] & ~
HPTE_R_R) >> 8;
636 *((
char *)hptep + 14) = rbyte;
637 while (!try_lock_tlbie(&kvm->
arch.tlbie_lock))
639 asm volatile(
PPC_TLBIE(%1,%0)
"; eieio; tlbsync"
640 : :
"r" (
rb),
"r" (kvm->
arch.lpid));
641 asm volatile(
"ptesync" : : :
"memory");
642 kvm->
arch.tlbie_lock = 0;
646 static int slb_base_page_shift[4] = {
658 unsigned long somask;
659 unsigned long vsid,
hash;
672 pshift = slb_base_page_shift[(slb_v &
SLB_VSID_LP) >> 4];
675 somask = (1
UL << 40) - 1;
679 somask = (1
UL << 28) - 1;
682 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->
arch.hpt_mask;
683 avpn = slb_v & ~(somask >> 16);
684 avpn |= (eaddr & somask) >> 16;
687 avpn &= ~((1
UL << (pshift - 16)) - 1);
693 hpte = (
unsigned long *)(kvm->
arch.hpt_virt + (hash << 7));
695 for (i = 0; i < 16; i += 2) {
700 if (!(v & valid) || (v &
mask) != val)
715 if ((v & valid) && (v &
mask) == val &&
716 hpte_page_size(v, r) == (1ul << pshift))
718 return (hash << 3) + (i >> 1);
727 hash = hash ^ kvm->
arch.hpt_mask;
745 unsigned long slb_v,
unsigned int status,
bool data)
747 struct kvm *kvm = vcpu->
kvm;
749 unsigned long v,
r,
gr;
753 unsigned long pp,
key;
757 if (status & DSISR_NOHPTE)
762 if (status & DSISR_NOHPTE)
766 hpte = (
unsigned long *)(kvm->
arch.hpt_virt + (index << 4));
769 rev = real_vmalloc_addr(&kvm->
arch.revmap[index]);
772 unlock_hpte(hpte, v);
781 status &= ~DSISR_NOHPTE;
784 return status | SRR1_ISI_N_OR_G;
785 if (!hpte_read_permission(pp, slb_v & key))
786 return status | SRR1_ISI_PROT;
787 }
else if (status & DSISR_ISSTORE) {
789 if (!hpte_write_permission(pp, slb_v & key))
790 return status | DSISR_PROTFAULT;
792 if (!hpte_read_permission(pp, slb_v & key))
793 return status | DSISR_PROTFAULT;
797 if (data && (vcpu->
arch.shregs.msr & MSR_DR)) {
798 unsigned int perm = hpte_get_skey_perm(gr, vcpu->
arch.amr);
799 if (status & DSISR_ISSTORE)
802 return status | DSISR_KEYFAULT;
808 vcpu->
arch.pgfault_hpte[0] =
v;
809 vcpu->
arch.pgfault_hpte[1] =
r;
812 if (data && (vcpu->
arch.shregs.msr & MSR_IR) &&