24 #include <linux/errno.h>
28 #include <linux/module.h>
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
35 #include <asm/cacheflush.h>
37 #include <asm/hw_irq.h>
45 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
46 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
52 {
"itlb_r",
VCPU_STAT(itlb_real_miss_exits) },
53 {
"itlb_v",
VCPU_STAT(itlb_virt_miss_exits) },
54 {
"dtlb_r",
VCPU_STAT(dtlb_real_miss_exits) },
55 {
"dtlb_v",
VCPU_STAT(dtlb_virt_miss_exits) },
59 {
"inst_emu",
VCPU_STAT(emulated_inst_exits) },
61 {
"ext_intr",
VCPU_STAT(ext_intr_exits) },
62 {
"halt_wakeup",
VCPU_STAT(halt_wakeup) },
64 {
"guest doorbell",
VCPU_STAT(gdbell_exits) },
73 printk(
"pc: %08lx msr: %08llx\n", vcpu->
arch.pc, vcpu->
arch.shared->msr);
75 printk(
"srr0: %08llx srr1: %08llx\n", vcpu->
arch.shared->srr0,
76 vcpu->
arch.shared->srr1);
78 printk(
"exceptions: %08lx\n", vcpu->
arch.pending_exceptions);
80 for (i = 0; i < 32; i += 4) {
81 printk(
"gpr%02d: %08lx %08lx %08lx %08lx\n", i,
82 kvmppc_get_gpr(vcpu, i),
83 kvmppc_get_gpr(vcpu, i+1),
84 kvmppc_get_gpr(vcpu, i+2),
85 kvmppc_get_gpr(vcpu, i+3));
95 vcpu->
arch.shadow_msr &= ~MSR_SPE;
99 static void kvmppc_vcpu_enable_spe(
struct kvm_vcpu *vcpu)
104 vcpu->
arch.shadow_msr |= MSR_SPE;
108 static void kvmppc_vcpu_sync_spe(
struct kvm_vcpu *vcpu)
110 if (vcpu->
arch.shared->msr & MSR_SPE) {
111 if (!(vcpu->
arch.shadow_msr & MSR_SPE))
112 kvmppc_vcpu_enable_spe(vcpu);
113 }
else if (vcpu->
arch.shadow_msr & MSR_SPE) {
118 static void kvmppc_vcpu_sync_spe(
struct kvm_vcpu *vcpu)
129 u32 old_msr = vcpu->
arch.shared->msr;
131 #ifdef CONFIG_KVM_BOOKE_HV
135 vcpu->
arch.shared->msr = new_msr;
138 kvmppc_vcpu_sync_spe(vcpu);
141 static void kvmppc_booke_queue_irqprio(
struct kvm_vcpu *vcpu,
147 static void kvmppc_core_queue_dtlb_miss(
struct kvm_vcpu *vcpu,
150 vcpu->
arch.queued_dear = dear_flags;
151 vcpu->
arch.queued_esr = esr_flags;
155 static void kvmppc_core_queue_data_storage(
struct kvm_vcpu *vcpu,
158 vcpu->
arch.queued_dear = dear_flags;
159 vcpu->
arch.queued_esr = esr_flags;
163 static void kvmppc_core_queue_inst_storage(
struct kvm_vcpu *vcpu,
166 vcpu->
arch.queued_esr = esr_flags;
172 vcpu->
arch.queued_esr = esr_flags;
199 kvmppc_booke_queue_irqprio(vcpu, prio);
209 static void set_guest_srr(
struct kvm_vcpu *vcpu,
unsigned long srr0,
u32 srr1)
211 #ifdef CONFIG_KVM_BOOKE_HV
212 mtspr(SPRN_GSRR0, srr0);
213 mtspr(SPRN_GSRR1, srr1);
215 vcpu->
arch.shared->srr0 = srr0;
216 vcpu->
arch.shared->srr1 = srr1;
220 static void set_guest_csrr(
struct kvm_vcpu *vcpu,
unsigned long srr0,
u32 srr1)
222 vcpu->
arch.csrr0 = srr0;
223 vcpu->
arch.csrr1 = srr1;
226 static void set_guest_dsrr(
struct kvm_vcpu *vcpu,
unsigned long srr0,
u32 srr1)
229 vcpu->
arch.dsrr0 = srr0;
230 vcpu->
arch.dsrr1 = srr1;
232 set_guest_csrr(vcpu, srr0, srr1);
236 static void set_guest_mcsrr(
struct kvm_vcpu *vcpu,
unsigned long srr0,
u32 srr1)
238 vcpu->
arch.mcsrr0 = srr0;
239 vcpu->
arch.mcsrr1 = srr1;
242 static unsigned long get_guest_dear(
struct kvm_vcpu *vcpu)
244 #ifdef CONFIG_KVM_BOOKE_HV
245 return mfspr(SPRN_GDEAR);
247 return vcpu->
arch.shared->dar;
251 static void set_guest_dear(
struct kvm_vcpu *vcpu,
unsigned long dear)
253 #ifdef CONFIG_KVM_BOOKE_HV
254 mtspr(SPRN_GDEAR, dear);
256 vcpu->
arch.shared->dar = dear;
260 static unsigned long get_guest_esr(
struct kvm_vcpu *vcpu)
262 #ifdef CONFIG_KVM_BOOKE_HV
263 return mfspr(SPRN_GESR);
265 return vcpu->
arch.shared->esr;
271 #ifdef CONFIG_KVM_BOOKE_HV
272 mtspr(SPRN_GESR, esr);
279 static int kvmppc_booke_irqprio_deliver(
struct kvm_vcpu *vcpu,
284 bool update_esr =
false, update_dear =
false;
285 ulong crit_raw = vcpu->
arch.shared->critical;
286 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
288 bool keep_irq =
false;
292 if (!(vcpu->
arch.shared->msr & MSR_SF)) {
293 crit_raw &= 0xffffffff;
294 crit_r1 &= 0xffffffff;
298 crit = (crit_raw == crit_r1);
300 crit = crit && !(vcpu->
arch.shared->msr & MSR_PR);
325 msr_mask = MSR_CE | MSR_ME | MSR_DE;
330 allowed = vcpu->
arch.shared->msr & MSR_CE;
331 allowed = allowed && !crit;
336 allowed = vcpu->
arch.shared->msr & MSR_ME;
337 allowed = allowed && !crit;
347 allowed = allowed && !crit;
348 msr_mask = MSR_CE | MSR_ME | MSR_DE;
352 allowed = vcpu->
arch.shared->msr & MSR_DE;
353 allowed = allowed && !crit;
362 set_guest_srr(vcpu, vcpu->
arch.pc,
363 vcpu->
arch.shared->msr);
366 set_guest_csrr(vcpu, vcpu->
arch.pc,
367 vcpu->
arch.shared->msr);
370 set_guest_dsrr(vcpu, vcpu->
arch.pc,
371 vcpu->
arch.shared->msr);
374 set_guest_mcsrr(vcpu, vcpu->
arch.pc,
375 vcpu->
arch.shared->msr);
380 if (update_esr ==
true)
381 set_guest_esr(vcpu, vcpu->
arch.queued_esr);
382 if (update_dear ==
true)
383 set_guest_dear(vcpu, vcpu->
arch.queued_dear);
390 #ifdef CONFIG_KVM_BOOKE_HV
407 static void update_timer_ints(
struct kvm_vcpu *vcpu)
409 if ((vcpu->
arch.tcr & TCR_DIE) && (vcpu->
arch.tsr & TSR_DIS))
415 static void kvmppc_core_check_exceptions(
struct kvm_vcpu *vcpu)
417 unsigned long *pending = &vcpu->
arch.pending_exceptions;
423 update_timer_ints(vcpu);
427 priority =
__ffs(*pending);
429 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
438 vcpu->
arch.shared->int_pending = !!*pending;
447 kvmppc_core_check_exceptions(vcpu);
449 if (vcpu->
arch.shared->msr & MSR_WE) {
468 static int kvmppc_prepare_to_enter(
struct kvm_vcpu *vcpu)
474 if (need_resched()) {
501 #ifdef CONFIG_PPC_FPU
507 if (!vcpu->
arch.sane) {
513 if (kvmppc_prepare_to_enter(vcpu)) {
521 #ifdef CONFIG_PPC_FPU
525 fpscr =
current->thread.fpscr.val;
526 fpexc_mode =
current->thread.fpexc_mode;
540 kvmppc_load_guest_fp(vcpu);
545 #ifdef CONFIG_PPC_FPU
546 kvmppc_save_guest_fp(vcpu);
556 current->thread.fpscr.val = fpscr;
557 current->thread.fpexc_mode = fpexc_mode;
586 __func__, vcpu->
arch.pc, vcpu->
arch.last_inst);
589 run->
hw.hardware_exit_reason = ~0ULL << 32;
590 run->
hw.hardware_exit_reason |= vcpu->
arch.last_inst;
599 static void kvmppc_fill_pt_regs(
struct pt_regs *
regs)
603 asm(
"mr %0, 1" :
"=r"(
r1));
604 asm(
"mflr %0" :
"=r"(
lr));
605 asm(
"mfmsr %0" :
"=r"(msr));
606 asm(
"bl 1f; 1: mflr %0" :
"=r"(
ip));
608 memset(regs, 0,
sizeof(*regs));
621 static void kvmppc_restart_interrupt(
struct kvm_vcpu *vcpu,
622 unsigned int exit_nr)
628 kvmppc_fill_pt_regs(®s);
632 kvmppc_fill_pt_regs(®s);
635 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
637 kvmppc_fill_pt_regs(®s);
645 kvmppc_fill_pt_regs(®s);
649 kvmppc_fill_pt_regs(®s);
650 #ifdef CONFIG_BOOKE_WDT
651 WatchdogException(®s);
668 unsigned int exit_nr)
676 kvmppc_restart_interrupt(vcpu, exit_nr);
688 run->
hw.hardware_exit_reason = ~1ULL << 32;
689 run->
hw.hardware_exit_reason |=
mfspr(SPRN_MCSR);
739 r = emulation_exit(run, vcpu);
743 if (vcpu->
arch.shared->msr & (MSR_PR | MSR_GS)) {
758 r = emulation_exit(run, vcpu);
769 if (vcpu->
arch.shared->msr & MSR_SPE)
770 kvmppc_vcpu_enable_spe(vcpu);
772 kvmppc_booke_queue_irqprio(vcpu,
804 __func__, exit_nr, vcpu->
arch.pc);
805 run->
hw.hardware_exit_reason = exit_nr;
811 kvmppc_core_queue_data_storage(vcpu, vcpu->
arch.fault_dear,
812 vcpu->
arch.fault_esr);
818 kvmppc_core_queue_inst_storage(vcpu, vcpu->
arch.fault_esr);
823 #ifdef CONFIG_KVM_BOOKE_HV
825 if (!(vcpu->
arch.shared->msr & MSR_PR)) {
839 if (!(vcpu->
arch.shared->msr & MSR_PR) &&
854 unsigned long eaddr = vcpu->
arch.fault_dear;
859 #ifdef CONFIG_KVM_E500V2
860 if (!(vcpu->
arch.shared->msr & MSR_PR) &&
872 if (gtlb_index < 0) {
874 kvmppc_core_queue_dtlb_miss(vcpu,
875 vcpu->
arch.fault_dear,
876 vcpu->
arch.fault_esr);
899 vcpu->
arch.paddr_accessed = gpaddr;
900 vcpu->
arch.vaddr_accessed = eaddr;
909 unsigned long eaddr = vcpu->
arch.pc;
918 if (gtlb_index < 0) {
953 dbsr =
mfspr(SPRN_DBSR);
954 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
955 mtspr(SPRN_DBSR, dbsr);
974 if (kvmppc_prepare_to_enter(vcpu)) {
992 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8);
995 #ifndef CONFIG_KVM_BOOKE_HV
996 vcpu->
arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
997 vcpu->
arch.shadow_pid = 1;
998 vcpu->
arch.shared->msr = 0;
1003 vcpu->
arch.ivpr = 0x55550000;
1005 vcpu->
arch.ivor[i] = 0x7700 | i * 4;
1018 regs->
pc = vcpu->
arch.pc;
1019 regs->
cr = kvmppc_get_cr(vcpu);
1021 regs->
lr = vcpu->
arch.lr;
1022 regs->
xer = kvmppc_get_xer(vcpu);
1023 regs->
msr = vcpu->
arch.shared->msr;
1024 regs->
srr0 = vcpu->
arch.shared->srr0;
1025 regs->
srr1 = vcpu->
arch.shared->srr1;
1037 regs->
gpr[i] = kvmppc_get_gpr(vcpu, i);
1046 vcpu->
arch.pc = regs->
pc;
1047 kvmppc_set_cr(vcpu, regs->
cr);
1049 vcpu->
arch.lr = regs->
lr;
1050 kvmppc_set_xer(vcpu, regs->
xer);
1052 vcpu->
arch.shared->srr0 = regs->
srr0;
1053 vcpu->
arch.shared->srr1 = regs->
srr1;
1065 kvmppc_set_gpr(vcpu, i, regs->
gpr[i]);
1070 static void get_sregs_base(
struct kvm_vcpu *vcpu,
1077 sregs->
u.
e.csrr0 = vcpu->
arch.csrr0;
1078 sregs->
u.
e.csrr1 = vcpu->
arch.csrr1;
1079 sregs->
u.
e.mcsr = vcpu->
arch.mcsr;
1080 sregs->
u.
e.esr = get_guest_esr(vcpu);
1081 sregs->
u.
e.dear = get_guest_dear(vcpu);
1082 sregs->
u.
e.tsr = vcpu->
arch.tsr;
1083 sregs->
u.
e.tcr = vcpu->
arch.tcr;
1086 sregs->
u.
e.vrsave = vcpu->
arch.vrsave;
1089 static int set_sregs_base(
struct kvm_vcpu *vcpu,
1095 vcpu->
arch.csrr0 = sregs->
u.
e.csrr0;
1096 vcpu->
arch.csrr1 = sregs->
u.
e.csrr1;
1097 vcpu->
arch.mcsr = sregs->
u.
e.mcsr;
1098 set_guest_esr(vcpu, sregs->
u.
e.esr);
1099 set_guest_dear(vcpu, sregs->
u.
e.dear);
1100 vcpu->
arch.vrsave = sregs->
u.
e.vrsave;
1104 vcpu->
arch.dec = sregs->
u.
e.dec;
1109 vcpu->
arch.tsr = sregs->
u.
e.tsr;
1110 update_timer_ints(vcpu);
1116 static void get_sregs_arch206(
struct kvm_vcpu *vcpu,
1122 sregs->
u.
e.mcsrr0 = vcpu->
arch.mcsrr0;
1123 sregs->
u.
e.mcsrr1 = vcpu->
arch.mcsrr1;
1124 sregs->
u.
e.decar = vcpu->
arch.decar;
1125 sregs->
u.
e.ivpr = vcpu->
arch.ivpr;
1128 static int set_sregs_arch206(
struct kvm_vcpu *vcpu,
1137 vcpu->
arch.mcsrr0 = sregs->
u.
e.mcsrr0;
1138 vcpu->
arch.mcsrr1 = sregs->
u.
e.mcsrr1;
1139 vcpu->
arch.decar = sregs->
u.
e.decar;
1140 vcpu->
arch.ivpr = sregs->
u.
e.ivpr;
1197 get_sregs_base(vcpu, sregs);
1198 get_sregs_arch206(vcpu, sregs);
1208 if (vcpu->
arch.pvr != sregs->
pvr)
1211 ret = set_sregs_base(vcpu, sregs);
1215 ret = set_sregs_arch206(vcpu, sregs);
1269 vcpu->
arch.tcr = new_tcr;
1270 update_timer_ints(vcpu);
1283 clear_bits(tsr_bits, &vcpu->
arch.tsr);
1284 update_timer_ints(vcpu);
1291 if (vcpu->
arch.tcr & TCR_ARE) {
1301 current->thread.kvm_vcpu = vcpu;
1311 #ifndef CONFIG_KVM_BOOKE_HV
1312 unsigned long ivor[16];
1313 unsigned long max_ivor = 0;
1327 ivor[0] =
mfspr(SPRN_IVOR0);
1328 ivor[1] =
mfspr(SPRN_IVOR1);
1329 ivor[2] =
mfspr(SPRN_IVOR2);
1330 ivor[3] =
mfspr(SPRN_IVOR3);
1331 ivor[4] =
mfspr(SPRN_IVOR4);
1332 ivor[5] =
mfspr(SPRN_IVOR5);
1333 ivor[6] =
mfspr(SPRN_IVOR6);
1334 ivor[7] =
mfspr(SPRN_IVOR7);
1335 ivor[8] =
mfspr(SPRN_IVOR8);
1336 ivor[9] =
mfspr(SPRN_IVOR9);
1337 ivor[10] =
mfspr(SPRN_IVOR10);
1338 ivor[11] =
mfspr(SPRN_IVOR11);
1339 ivor[12] =
mfspr(SPRN_IVOR12);
1340 ivor[13] =
mfspr(SPRN_IVOR13);
1341 ivor[14] =
mfspr(SPRN_IVOR14);
1342 ivor[15] =
mfspr(SPRN_IVOR15);
1344 for (i = 0; i < 16; i++) {
1345 if (ivor[i] > max_ivor)
1350 kvmppc_handler_len);