23 #include <linux/export.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
38 #include <linux/sched.h>
47 static int kvmppc_handle_ext(
struct kvm_vcpu *vcpu,
unsigned int exit_nr,
51 #ifdef CONFIG_PPC_BOOK3S_32
52 #define MSR_USER32 MSR_USER
53 #define MSR_USER64 MSR_USER
54 #define HW_PAGE_SIZE PAGE_SIZE
55 #define __hard_irq_disable local_irq_disable
56 #define __hard_irq_enable local_irq_enable
61 #ifdef CONFIG_PPC_BOOK3S_64
63 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow,
sizeof(svcpu->slb));
64 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
65 sizeof(get_paca()->shadow_vcpu));
66 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
70 #ifdef CONFIG_PPC_BOOK3S_32
71 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
77 #ifdef CONFIG_PPC_BOOK3S_64
79 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb,
sizeof(svcpu->slb));
80 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
81 sizeof(get_paca()->shadow_vcpu));
82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
91 static void kvmppc_recalc_shadow_msr(
struct kvm_vcpu *vcpu)
96 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE |
MSR_BE | MSR_DE;
98 smsr |= MSR_ME |
MSR_RI | MSR_IR | MSR_DR | MSR_PR |
MSR_EE;
100 smsr |= (vcpu->
arch.shared->msr & vcpu->
arch.guest_owned_ext);
102 #ifdef CONFIG_PPC_BOOK3S_64
103 smsr |= MSR_ISF | MSR_HV;
105 vcpu->
arch.shadow_msr = smsr;
116 msr &= to_book3s(vcpu)->msr_mask;
117 vcpu->
arch.shared->msr = msr;
118 kvmppc_recalc_shadow_msr(vcpu);
121 if (!vcpu->
arch.pending_exceptions) {
124 vcpu->
stat.halt_wakeup++;
128 vcpu->
arch.shared->msr = msr;
132 if ((vcpu->
arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
133 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
138 if (!(msr & MSR_PR) && vcpu->
arch.magic_page_pa) {
156 if (vcpu->
arch.magic_page_pa &&
157 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
164 if (vcpu->
arch.shared->msr & MSR_FP)
174 #ifdef CONFIG_PPC_BOOK3S_64
175 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
177 if (!to_book3s(vcpu)->hior_explicit)
178 to_book3s(vcpu)->hior = 0xfff00000;
179 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
185 if (!to_book3s(vcpu)->hior_explicit)
186 to_book3s(vcpu)->hior = 0;
187 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
196 if (vcpu->
arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
203 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
205 #ifdef CONFIG_PPC_BOOK3S_32
211 asm (
"mfpvr %0" :
"=r"(host_pvr));
224 mtspr(SPRN_HID2_GEKKO,
mfspr(SPRN_HID2_GEKKO) | (1 << 29));
245 if (is_error_page(hpage))
249 hpage_offset &= ~0xFFFULL;
256 for (i=hpage_offset; i < hpage_offset + (
HW_PAGE_SIZE / 4); i++)
257 if ((page[i] & 0xff0007ff) ==
INS_DCBZ)
258 page[i] &= 0xfffffff7;
264 static int kvmppc_visible_gfn(
struct kvm_vcpu *vcpu,
gfn_t gfn)
268 if (!(vcpu->
arch.shared->msr & MSR_SF))
280 ulong eaddr,
int vec)
287 bool is_mmio =
false;
288 bool dr = (vcpu->
arch.shared->msr & MSR_DR) ?
true :
false;
289 bool ir = (vcpu->
arch.shared->msr & MSR_IR) ?
true :
false;
292 relocated = data ? dr :
ir;
296 page_found = vcpu->
arch.mmu.xlate(vcpu, eaddr, &pte, data);
303 pte.
vpage = eaddr >> 12;
306 switch (vcpu->
arch.shared->msr & (MSR_DR|MSR_IR)) {
312 vcpu->
arch.mmu.esid_to_vsid(vcpu, eaddr >>
SID_SHIFT, &vsid);
314 if ((vcpu->
arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
325 if (vcpu->
arch.mmu.is_dcbz32(vcpu) &&
335 if (page_found == -
ENOENT) {
338 vcpu->
arch.shared->dar = kvmppc_get_fault_dar(vcpu);
340 vcpu->
arch.shared->msr |=
344 }
else if (page_found == -
EPERM) {
347 vcpu->
arch.shared->dar = kvmppc_get_fault_dar(vcpu);
349 vcpu->
arch.shared->dsisr |= DSISR_PROTFAULT;
350 vcpu->
arch.shared->msr |=
354 }
else if (page_found == -
EINVAL) {
356 vcpu->
arch.shared->dar = kvmppc_get_fault_dar(vcpu);
358 }
else if (!is_mmio &&
363 vcpu->
stat.sp_storage++;
364 else if (vcpu->
arch.mmu.is_dcbz32(vcpu) &&
366 kvmppc_patch_dcbz(vcpu, &pte);
369 vcpu->
stat.mmio_exits++;
380 static inline int get_fpr_index(
int i)
392 u64 *vcpu_fpr = vcpu->
arch.fpr;
394 u64 *vcpu_vsx = vcpu->
arch.vsr;
396 u64 *thread_fpr = (
u64*)t->fpr;
399 if (!(vcpu->
arch.guest_owned_ext & msr))
410 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
412 vcpu->
arch.fpscr = t->fpscr.val;
415 #ifdef CONFIG_ALTIVEC
418 vcpu->
arch.vscr = t->vscr;
425 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
432 vcpu->
arch.guest_owned_ext &= ~msr;
433 current->thread.regs->msr &= ~msr;
434 kvmppc_recalc_shadow_msr(vcpu);
437 static int kvmppc_read_inst(
struct kvm_vcpu *vcpu)
439 ulong srr0 = kvmppc_get_pc(vcpu);
440 u32 last_inst = kvmppc_get_last_inst(vcpu);
443 ret =
kvmppc_ld(vcpu, &srr0,
sizeof(
u32), &last_inst,
false);
447 msr = kvmppc_set_field(msr, 33, 33, 1);
448 msr = kvmppc_set_field(msr, 34, 36, 0);
449 vcpu->
arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
457 static int kvmppc_check_ext(
struct kvm_vcpu *vcpu,
unsigned int exit_nr)
473 static int kvmppc_handle_ext(
struct kvm_vcpu *vcpu,
unsigned int exit_nr,
477 u64 *vcpu_fpr = vcpu->
arch.fpr;
479 u64 *vcpu_vsx = vcpu->
arch.vsr;
481 u64 *thread_fpr = (
u64*)t->fpr;
488 if (!(vcpu->
arch.shared->msr & msr)) {
494 if (vcpu->
arch.guest_owned_ext & msr) {
502 current->thread.regs->msr |= msr;
507 thread_fpr[get_fpr_index(i)] = vcpu_fpr[
i];
509 t->fpscr.val = vcpu->
arch.fpscr;
514 #ifdef CONFIG_ALTIVEC
516 t->vscr = vcpu->
arch.vscr;
524 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[
i];
532 vcpu->
arch.guest_owned_ext |= msr;
534 kvmppc_recalc_shadow_msr(vcpu);
540 unsigned int exit_nr)
544 vcpu->
stat.sum_exits++;
552 trace_kvm_book3s_exit(exit_nr, vcpu);
560 vcpu->
stat.pf_instruc++;
562 #ifdef CONFIG_PPC_BOOK3S_32
575 if (shadow_srr1 & 0x40000000) {
577 vcpu->
stat.sp_instruc++;
578 }
else if (vcpu->
arch.mmu.is_dcbz32(vcpu) &&
588 vcpu->
arch.shared->msr |= shadow_srr1 & 0x58000000;
596 ulong dar = kvmppc_get_fault_dar(vcpu);
599 vcpu->
stat.pf_storage++;
601 #ifdef CONFIG_PPC_BOOK3S_32
614 if (fault_dsisr & DSISR_NOHPTE) {
626 vcpu->
arch.shared->dar = kvmppc_get_fault_dar(vcpu);
642 vcpu->
stat.dec_exits++;
648 vcpu->
stat.ext_intr_exits++;
662 svcpu = svcpu_get(vcpu);
666 if (vcpu->
arch.shared->msr & MSR_PR) {
668 printk(
KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
670 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
678 vcpu->
stat.emulated_inst_exits++;
689 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
703 if (vcpu->
arch.papr_enabled &&
704 (kvmppc_get_last_inst(vcpu) == 0x44000022) &&
705 !(vcpu->
arch.shared->msr & MSR_PR)) {
707 ulong cmd = kvmppc_get_gpr(vcpu, 3);
710 #ifdef CONFIG_KVM_BOOK3S_64_PR
718 for (i = 0; i < 9; ++
i) {
719 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
723 vcpu->
arch.hcall_needed = 1;
725 }
else if (vcpu->
arch.osi_enabled &&
729 u64 *gprs = run->
osi.gprs;
733 for (i = 0; i < 32; i++)
734 gprs[i] = kvmppc_get_gpr(vcpu, i);
735 vcpu->
arch.osi_needed = 1;
737 }
else if (!(vcpu->
arch.shared->msr & MSR_PR) &&
744 vcpu->
stat.syscall_exits++;
761 switch (kvmppc_check_ext(vcpu, exit_nr)) {
764 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
768 goto program_interrupt;
779 kvmppc_get_last_inst(vcpu));
781 kvmppc_get_last_inst(vcpu));
798 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
817 __hard_irq_disable();
823 vcpu->
stat.signal_exits++;
834 trace_kvm_book3s_reenter(r, vcpu);
847 sregs->
u.
s.sdr1 = to_book3s(vcpu)->sdr1;
849 for (i = 0; i < 64; i++) {
850 sregs->
u.
s.ppc64.slb[
i].slbe = vcpu->
arch.slb[
i].orige |
i;
851 sregs->
u.
s.ppc64.slb[
i].slbv = vcpu->
arch.slb[
i].origv;
854 for (i = 0; i < 16; i++)
855 sregs->
u.
s.ppc32.sr[i] = vcpu->
arch.shared->sr[i];
857 for (i = 0; i < 8; i++) {
858 sregs->
u.
s.ppc32.ibat[
i] = vcpu3s->
ibat[
i].raw;
859 sregs->
u.
s.ppc32.dbat[
i] = vcpu3s->
dbat[
i].raw;
874 vcpu3s->
sdr1 = sregs->
u.
s.sdr1;
876 for (i = 0; i < 64; i++) {
877 vcpu->
arch.mmu.slbmte(vcpu, sregs->
u.
s.ppc64.slb[i].slbv,
878 sregs->
u.
s.ppc64.slb[i].slbe);
881 for (i = 0; i < 16; i++) {
882 vcpu->
arch.mmu.mtsrin(vcpu, i, sregs->
u.
s.ppc32.sr[i]);
884 for (i = 0; i < 8; i++) {
886 (
u32)sregs->
u.
s.ppc32.ibat[i]);
888 (
u32)(sregs->
u.
s.ppc32.ibat[i] >> 32));
890 (
u32)sregs->
u.
s.ppc32.dbat[i]);
892 (
u32)(sregs->
u.
s.ppc32.dbat[i] >> 32));
909 &to_book3s(vcpu)->
hior,
sizeof(
u64));
927 to_book3s(vcpu)->hior_explicit =
true;
957 vcpu = &vcpu_book3s->
vcpu;
960 goto free_shadow_vcpu;
968 #ifdef CONFIG_PPC_BOOK3S_64
970 vcpu->
arch.pvr = 0x3C0301;
973 vcpu->
arch.pvr = 0x84202;
976 vcpu->
arch.slb_nr = 64;
978 vcpu->
arch.shadow_msr = MSR_USER64;
1012 #ifdef CONFIG_ALTIVEC
1026 if (!vcpu->
arch.sane) {
1040 __hard_irq_disable();
1043 if (signal_pending(
current)) {
1044 __hard_irq_enable();
1051 if (
current->thread.regs->msr & MSR_FP)
1054 fpscr =
current->thread.fpscr.val;
1055 fpexc_mode =
current->thread.fpexc_mode;
1057 #ifdef CONFIG_ALTIVEC
1059 used_vr =
current->thread.used_vr;
1061 if (
current->thread.regs->msr & MSR_VEC)
1065 vrsave =
current->thread.vrsave;
1071 used_vsr =
current->thread.used_vsr;
1072 if (used_vsr && (
current->thread.regs->msr & MSR_VSX))
1077 ext_msr =
current->thread.regs->msr;
1080 if (vcpu->
arch.shared->msr & MSR_FP)
1089 current->thread.regs->msr = ext_msr;
1098 current->thread.fpscr.val = fpscr;
1099 current->thread.fpexc_mode = fpexc_mode;
1101 #ifdef CONFIG_ALTIVEC
1103 if (used_vr &&
current->thread.used_vr) {
1106 current->thread.vrsave = vrsave;
1108 current->thread.used_vr = used_vr;
1112 current->thread.used_vsr = used_vsr;
1149 n = kvm_dirty_bitmap_bytes(memslot);
1169 info->
sps[0].page_shift = 12;
1170 info->
sps[0].slb_enc = 0;
1171 info->
sps[0].enc[0].page_shift = 12;
1172 info->
sps[0].enc[0].pte_enc = 0;
1175 info->
sps[1].page_shift = 24;
1177 info->
sps[1].enc[0].page_shift = 24;
1178 info->
sps[1].enc[0].pte_enc = 0;
1198 INIT_LIST_HEAD(&kvm->
arch.spapr_tce_tables);
1207 WARN_ON(!list_empty(&kvm->
arch.spapr_tce_tables));
1211 static int kvmppc_book3s_init(
void)
1226 static void kvmppc_book3s_exit(
void)