Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
booke.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2010-2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <[email protected]>
19  * Christian Ehrhardt <[email protected]>
20  * Scott Wood <[email protected]>
21  * Varun Sethi <[email protected]>
22  */
23 
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/kvm_host.h>
27 #include <linux/gfp.h>
28 #include <linux/module.h>
29 #include <linux/vmalloc.h>
30 #include <linux/fs.h>
31 
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/dbell.h>
37 #include <asm/hw_irq.h>
38 #include <asm/irq.h>
39 
40 #include "timing.h"
41 #include "booke.h"
42 
43 unsigned long kvmppc_booke_handlers;
44 
45 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
46 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
47 
49  { "mmio", VCPU_STAT(mmio_exits) },
50  { "dcr", VCPU_STAT(dcr_exits) },
51  { "sig", VCPU_STAT(signal_exits) },
52  { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
53  { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
54  { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
55  { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
56  { "sysc", VCPU_STAT(syscall_exits) },
57  { "isi", VCPU_STAT(isi_exits) },
58  { "dsi", VCPU_STAT(dsi_exits) },
59  { "inst_emu", VCPU_STAT(emulated_inst_exits) },
60  { "dec", VCPU_STAT(dec_exits) },
61  { "ext_intr", VCPU_STAT(ext_intr_exits) },
62  { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63  { "doorbell", VCPU_STAT(dbell_exits) },
64  { "guest doorbell", VCPU_STAT(gdbell_exits) },
65  { NULL }
66 };
67 
68 /* TODO: use vcpu_printf() */
69 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
70 {
71  int i;
72 
73  printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
74  printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
75  printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
76  vcpu->arch.shared->srr1);
77 
78  printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
79 
80  for (i = 0; i < 32; i += 4) {
81  printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
82  kvmppc_get_gpr(vcpu, i),
83  kvmppc_get_gpr(vcpu, i+1),
84  kvmppc_get_gpr(vcpu, i+2),
85  kvmppc_get_gpr(vcpu, i+3));
86  }
87 }
88 
89 #ifdef CONFIG_SPE
90 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
91 {
95  vcpu->arch.shadow_msr &= ~MSR_SPE;
97 }
98 
99 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
100 {
101  preempt_disable();
103  kvmppc_load_guest_spe(vcpu);
104  vcpu->arch.shadow_msr |= MSR_SPE;
105  preempt_enable();
106 }
107 
108 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
109 {
110  if (vcpu->arch.shared->msr & MSR_SPE) {
111  if (!(vcpu->arch.shadow_msr & MSR_SPE))
112  kvmppc_vcpu_enable_spe(vcpu);
113  } else if (vcpu->arch.shadow_msr & MSR_SPE) {
115  }
116 }
117 #else
118 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
119 {
120 }
121 #endif
122 
123 /*
124  * Helper function for "full" MSR writes. No need to call this if only
125  * EE/CE/ME/DE/RI are changing.
126  */
127 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
128 {
129  u32 old_msr = vcpu->arch.shared->msr;
130 
131 #ifdef CONFIG_KVM_BOOKE_HV
132  new_msr |= MSR_GS;
133 #endif
134 
135  vcpu->arch.shared->msr = new_msr;
136 
137  kvmppc_mmu_msr_notify(vcpu, old_msr);
138  kvmppc_vcpu_sync_spe(vcpu);
139 }
140 
141 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
142  unsigned int priority)
143 {
144  set_bit(priority, &vcpu->arch.pending_exceptions);
145 }
146 
147 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
148  ulong dear_flags, ulong esr_flags)
149 {
150  vcpu->arch.queued_dear = dear_flags;
151  vcpu->arch.queued_esr = esr_flags;
152  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
153 }
154 
155 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
156  ulong dear_flags, ulong esr_flags)
157 {
158  vcpu->arch.queued_dear = dear_flags;
159  vcpu->arch.queued_esr = esr_flags;
160  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
161 }
162 
163 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
164  ulong esr_flags)
165 {
166  vcpu->arch.queued_esr = esr_flags;
167  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
168 }
169 
170 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
171 {
172  vcpu->arch.queued_esr = esr_flags;
173  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
174 }
175 
176 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
177 {
178  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
179 }
180 
182 {
183  return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
184 }
185 
187 {
188  clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
189 }
190 
192  struct kvm_interrupt *irq)
193 {
194  unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
195 
196  if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
198 
199  kvmppc_booke_queue_irqprio(vcpu, prio);
200 }
201 
203  struct kvm_interrupt *irq)
204 {
205  clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
206  clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
207 }
208 
209 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
210 {
211 #ifdef CONFIG_KVM_BOOKE_HV
212  mtspr(SPRN_GSRR0, srr0);
213  mtspr(SPRN_GSRR1, srr1);
214 #else
215  vcpu->arch.shared->srr0 = srr0;
216  vcpu->arch.shared->srr1 = srr1;
217 #endif
218 }
219 
220 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
221 {
222  vcpu->arch.csrr0 = srr0;
223  vcpu->arch.csrr1 = srr1;
224 }
225 
226 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
227 {
229  vcpu->arch.dsrr0 = srr0;
230  vcpu->arch.dsrr1 = srr1;
231  } else {
232  set_guest_csrr(vcpu, srr0, srr1);
233  }
234 }
235 
236 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
237 {
238  vcpu->arch.mcsrr0 = srr0;
239  vcpu->arch.mcsrr1 = srr1;
240 }
241 
242 static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
243 {
244 #ifdef CONFIG_KVM_BOOKE_HV
245  return mfspr(SPRN_GDEAR);
246 #else
247  return vcpu->arch.shared->dar;
248 #endif
249 }
250 
251 static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
252 {
253 #ifdef CONFIG_KVM_BOOKE_HV
254  mtspr(SPRN_GDEAR, dear);
255 #else
256  vcpu->arch.shared->dar = dear;
257 #endif
258 }
259 
260 static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
261 {
262 #ifdef CONFIG_KVM_BOOKE_HV
263  return mfspr(SPRN_GESR);
264 #else
265  return vcpu->arch.shared->esr;
266 #endif
267 }
268 
269 static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
270 {
271 #ifdef CONFIG_KVM_BOOKE_HV
272  mtspr(SPRN_GESR, esr);
273 #else
274  vcpu->arch.shared->esr = esr;
275 #endif
276 }
277 
278 /* Deliver the interrupt of the corresponding priority, if possible. */
279 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
280  unsigned int priority)
281 {
282  int allowed = 0;
283  ulong msr_mask = 0;
284  bool update_esr = false, update_dear = false;
285  ulong crit_raw = vcpu->arch.shared->critical;
286  ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
287  bool crit;
288  bool keep_irq = false;
289  enum int_class int_class;
290 
291  /* Truncate crit indicators in 32 bit mode */
292  if (!(vcpu->arch.shared->msr & MSR_SF)) {
293  crit_raw &= 0xffffffff;
294  crit_r1 &= 0xffffffff;
295  }
296 
297  /* Critical section when crit == r1 */
298  crit = (crit_raw == crit_r1);
299  /* ... and we're in supervisor mode */
300  crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
301 
302  if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
303  priority = BOOKE_IRQPRIO_EXTERNAL;
304  keep_irq = true;
305  }
306 
307  switch (priority) {
310  update_dear = true;
311  /* fall through */
314  update_esr = true;
315  /* fall through */
324  allowed = 1;
325  msr_mask = MSR_CE | MSR_ME | MSR_DE;
326  int_class = INT_CLASS_NONCRIT;
327  break;
330  allowed = vcpu->arch.shared->msr & MSR_CE;
331  allowed = allowed && !crit;
332  msr_mask = MSR_ME;
333  int_class = INT_CLASS_CRIT;
334  break;
336  allowed = vcpu->arch.shared->msr & MSR_ME;
337  allowed = allowed && !crit;
338  int_class = INT_CLASS_MC;
339  break;
341  case BOOKE_IRQPRIO_FIT:
342  keep_irq = true;
343  /* fall through */
345  case BOOKE_IRQPRIO_DBELL:
346  allowed = vcpu->arch.shared->msr & MSR_EE;
347  allowed = allowed && !crit;
348  msr_mask = MSR_CE | MSR_ME | MSR_DE;
349  int_class = INT_CLASS_NONCRIT;
350  break;
351  case BOOKE_IRQPRIO_DEBUG:
352  allowed = vcpu->arch.shared->msr & MSR_DE;
353  allowed = allowed && !crit;
354  msr_mask = MSR_ME;
355  int_class = INT_CLASS_CRIT;
356  break;
357  }
358 
359  if (allowed) {
360  switch (int_class) {
361  case INT_CLASS_NONCRIT:
362  set_guest_srr(vcpu, vcpu->arch.pc,
363  vcpu->arch.shared->msr);
364  break;
365  case INT_CLASS_CRIT:
366  set_guest_csrr(vcpu, vcpu->arch.pc,
367  vcpu->arch.shared->msr);
368  break;
369  case INT_CLASS_DBG:
370  set_guest_dsrr(vcpu, vcpu->arch.pc,
371  vcpu->arch.shared->msr);
372  break;
373  case INT_CLASS_MC:
374  set_guest_mcsrr(vcpu, vcpu->arch.pc,
375  vcpu->arch.shared->msr);
376  break;
377  }
378 
379  vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
380  if (update_esr == true)
381  set_guest_esr(vcpu, vcpu->arch.queued_esr);
382  if (update_dear == true)
383  set_guest_dear(vcpu, vcpu->arch.queued_dear);
384  kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
385 
386  if (!keep_irq)
387  clear_bit(priority, &vcpu->arch.pending_exceptions);
388  }
389 
390 #ifdef CONFIG_KVM_BOOKE_HV
391  /*
392  * If an interrupt is pending but masked, raise a guest doorbell
393  * so that we are notified when the guest enables the relevant
394  * MSR bit.
395  */
396  if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
398  if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
400  if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
402 #endif
403 
404  return allowed;
405 }
406 
407 static void update_timer_ints(struct kvm_vcpu *vcpu)
408 {
409  if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
410  kvmppc_core_queue_dec(vcpu);
411  else
413 }
414 
415 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
416 {
417  unsigned long *pending = &vcpu->arch.pending_exceptions;
418  unsigned int priority;
419 
420  if (vcpu->requests) {
421  if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
422  smp_mb();
423  update_timer_ints(vcpu);
424  }
425  }
426 
427  priority = __ffs(*pending);
428  while (priority < BOOKE_IRQPRIO_MAX) {
429  if (kvmppc_booke_irqprio_deliver(vcpu, priority))
430  break;
431 
432  priority = find_next_bit(pending,
433  BITS_PER_BYTE * sizeof(*pending),
434  priority + 1);
435  }
436 
437  /* Tell the guest about our interrupt status */
438  vcpu->arch.shared->int_pending = !!*pending;
439 }
440 
441 /* Check pending exceptions and deliver one, if possible. */
443 {
444  int r = 0;
446 
447  kvmppc_core_check_exceptions(vcpu);
448 
449  if (vcpu->arch.shared->msr & MSR_WE) {
451  kvm_vcpu_block(vcpu);
454 
455  kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
456  r = 1;
457  };
458 
459  return r;
460 }
461 
462 /*
463  * Common checks before entering the guest world. Call with interrupts
464  * disabled.
465  *
466  * returns !0 if a signal is pending and check_signal is true
467  */
468 static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
469 {
470  int r = 0;
471 
473  while (true) {
474  if (need_resched()) {
476  cond_resched();
478  continue;
479  }
480 
481  if (signal_pending(current)) {
482  r = 1;
483  break;
484  }
485 
486  if (kvmppc_core_prepare_to_enter(vcpu)) {
487  /* interrupts got enabled in between, so we
488  are back at square 1 */
489  continue;
490  }
491 
492  break;
493  }
494 
495  return r;
496 }
497 
498 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
499 {
500  int ret;
501 #ifdef CONFIG_PPC_FPU
502  unsigned int fpscr;
503  int fpexc_mode;
504  u64 fpr[32];
505 #endif
506 
507  if (!vcpu->arch.sane) {
509  return -EINVAL;
510  }
511 
513  if (kvmppc_prepare_to_enter(vcpu)) {
514  kvm_run->exit_reason = KVM_EXIT_INTR;
515  ret = -EINTR;
516  goto out;
517  }
518 
519  kvm_guest_enter();
520 
521 #ifdef CONFIG_PPC_FPU
522  /* Save userspace FPU state in stack */
524  memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
525  fpscr = current->thread.fpscr.val;
526  fpexc_mode = current->thread.fpexc_mode;
527 
528  /* Restore guest FPU state to thread */
529  memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
530  current->thread.fpscr.val = vcpu->arch.fpscr;
531 
532  /*
533  * Since we can't trap on MSR_FP in GS-mode, we consider the guest
534  * as always using the FPU. Kernel usage of FP (via
535  * enable_kernel_fp()) in this thread must not occur while
536  * vcpu->fpu_active is set.
537  */
538  vcpu->fpu_active = 1;
539 
540  kvmppc_load_guest_fp(vcpu);
541 #endif
542 
543  ret = __kvmppc_vcpu_run(kvm_run, vcpu);
544 
545 #ifdef CONFIG_PPC_FPU
546  kvmppc_save_guest_fp(vcpu);
547 
548  vcpu->fpu_active = 0;
549 
550  /* Save guest FPU state from thread */
551  memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
552  vcpu->arch.fpscr = current->thread.fpscr.val;
553 
554  /* Restore userspace FPU state from stack */
555  memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
556  current->thread.fpscr.val = fpscr;
557  current->thread.fpexc_mode = fpexc_mode;
558 #endif
559 
560  kvm_guest_exit();
561 
562 out:
564  return ret;
565 }
566 
567 static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
568 {
569  enum emulation_result er;
570 
571  er = kvmppc_emulate_instruction(run, vcpu);
572  switch (er) {
573  case EMULATE_DONE:
574  /* don't overwrite subtypes, just account kvm_stats */
575  kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
576  /* Future optimization: only reload non-volatiles if
577  * they were actually modified by emulation. */
578  return RESUME_GUEST_NV;
579 
580  case EMULATE_DO_DCR:
581  run->exit_reason = KVM_EXIT_DCR;
582  return RESUME_HOST;
583 
584  case EMULATE_FAIL:
585  printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
586  __func__, vcpu->arch.pc, vcpu->arch.last_inst);
587  /* For debugging, encode the failing instruction and
588  * report it to userspace. */
589  run->hw.hardware_exit_reason = ~0ULL << 32;
590  run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
591  kvmppc_core_queue_program(vcpu, ESR_PIL);
592  return RESUME_HOST;
593 
594  default:
595  BUG();
596  }
597 }
598 
599 static void kvmppc_fill_pt_regs(struct pt_regs *regs)
600 {
601  ulong r1, ip, msr, lr;
602 
603  asm("mr %0, 1" : "=r"(r1));
604  asm("mflr %0" : "=r"(lr));
605  asm("mfmsr %0" : "=r"(msr));
606  asm("bl 1f; 1: mflr %0" : "=r"(ip));
607 
608  memset(regs, 0, sizeof(*regs));
609  regs->gpr[1] = r1;
610  regs->nip = ip;
611  regs->msr = msr;
612  regs->link = lr;
613 }
614 
615 /*
616  * For interrupts needed to be handled by host interrupt handlers,
617  * corresponding host handler are called from here in similar way
618  * (but not exact) as they are called from low level handler
619  * (such as from arch/powerpc/kernel/head_fsl_booke.S).
620  */
621 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
622  unsigned int exit_nr)
623 {
624  struct pt_regs regs;
625 
626  switch (exit_nr) {
628  kvmppc_fill_pt_regs(&regs);
629  do_IRQ(&regs);
630  break;
632  kvmppc_fill_pt_regs(&regs);
633  timer_interrupt(&regs);
634  break;
635 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
637  kvmppc_fill_pt_regs(&regs);
638  doorbell_exception(&regs);
639  break;
640 #endif
642  /* FIXME */
643  break;
645  kvmppc_fill_pt_regs(&regs);
647  break;
649  kvmppc_fill_pt_regs(&regs);
650 #ifdef CONFIG_BOOKE_WDT
651  WatchdogException(&regs);
652 #else
653  unknown_exception(&regs);
654 #endif
655  break;
657  unknown_exception(&regs);
658  break;
659  }
660 }
661 
667 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
668  unsigned int exit_nr)
669 {
670  int r = RESUME_HOST;
671 
672  /* update before a new last_exit_type is rewritten */
674 
675  /* restart interrupts if they were meant for the host */
676  kvmppc_restart_interrupt(vcpu, exit_nr);
677 
679 
682 
683  switch (exit_nr) {
685  printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
686  kvmppc_dump_vcpu(vcpu);
687  /* For debugging, send invalid exit reason to user space */
688  run->hw.hardware_exit_reason = ~1ULL << 32;
689  run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
690  r = RESUME_HOST;
691  break;
692 
694  kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
695  r = RESUME_GUEST;
696  break;
697 
699  kvmppc_account_exit(vcpu, DEC_EXITS);
700  r = RESUME_GUEST;
701  break;
702 
704  r = RESUME_GUEST;
705  break;
706 
708  kvmppc_account_exit(vcpu, DBELL_EXITS);
709  r = RESUME_GUEST;
710  break;
711 
713  kvmppc_account_exit(vcpu, GDBELL_EXITS);
714 
715  /*
716  * We are here because there is a pending guest interrupt
717  * which could not be delivered as MSR_CE or MSR_ME was not
718  * set. Once we break from here we will retry delivery.
719  */
720  r = RESUME_GUEST;
721  break;
722 
724  kvmppc_account_exit(vcpu, GDBELL_EXITS);
725 
726  /*
727  * We are here because there is a pending guest interrupt
728  * which could not be delivered as MSR_EE was not set. Once
729  * we break from here we will retry delivery.
730  */
731  r = RESUME_GUEST;
732  break;
733 
735  r = RESUME_GUEST;
736  break;
737 
739  r = emulation_exit(run, vcpu);
740  break;
741 
743  if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
744  /*
745  * Program traps generated by user-level software must
746  * be handled by the guest kernel.
747  *
748  * In GS mode, hypervisor privileged instructions trap
749  * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
750  * actual program interrupts, handled by the guest.
751  */
752  kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
753  r = RESUME_GUEST;
754  kvmppc_account_exit(vcpu, USR_PR_INST);
755  break;
756  }
757 
758  r = emulation_exit(run, vcpu);
759  break;
760 
762  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
763  kvmppc_account_exit(vcpu, FP_UNAVAIL);
764  r = RESUME_GUEST;
765  break;
766 
767 #ifdef CONFIG_SPE
769  if (vcpu->arch.shared->msr & MSR_SPE)
770  kvmppc_vcpu_enable_spe(vcpu);
771  else
772  kvmppc_booke_queue_irqprio(vcpu,
774  r = RESUME_GUEST;
775  break;
776  }
777 
779  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
780  r = RESUME_GUEST;
781  break;
782 
784  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
785  r = RESUME_GUEST;
786  break;
787 #else
789  /*
790  * Guest wants SPE, but host kernel doesn't support it. Send
791  * an "unimplemented operation" program check to the guest.
792  */
793  kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
794  r = RESUME_GUEST;
795  break;
796 
797  /*
798  * These really should never happen without CONFIG_SPE,
799  * as we should never enable the real MSR[SPE] in the guest.
800  */
803  printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
804  __func__, exit_nr, vcpu->arch.pc);
805  run->hw.hardware_exit_reason = exit_nr;
806  r = RESUME_HOST;
807  break;
808 #endif
809 
811  kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
812  vcpu->arch.fault_esr);
813  kvmppc_account_exit(vcpu, DSI_EXITS);
814  r = RESUME_GUEST;
815  break;
816 
818  kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
819  kvmppc_account_exit(vcpu, ISI_EXITS);
820  r = RESUME_GUEST;
821  break;
822 
823 #ifdef CONFIG_KVM_BOOKE_HV
825  if (!(vcpu->arch.shared->msr & MSR_PR)) {
826  kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
827  } else {
828  /*
829  * hcall from guest userspace -- send privileged
830  * instruction program check.
831  */
832  kvmppc_core_queue_program(vcpu, ESR_PPR);
833  }
834 
835  r = RESUME_GUEST;
836  break;
837 #else
839  if (!(vcpu->arch.shared->msr & MSR_PR) &&
840  (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
841  /* KVM PV hypercalls */
842  kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
843  r = RESUME_GUEST;
844  } else {
845  /* Guest syscalls */
846  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
847  }
848  kvmppc_account_exit(vcpu, SYSCALL_EXITS);
849  r = RESUME_GUEST;
850  break;
851 #endif
852 
854  unsigned long eaddr = vcpu->arch.fault_dear;
855  int gtlb_index;
856  gpa_t gpaddr;
857  gfn_t gfn;
858 
859 #ifdef CONFIG_KVM_E500V2
860  if (!(vcpu->arch.shared->msr & MSR_PR) &&
861  (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
862  kvmppc_map_magic(vcpu);
863  kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
864  r = RESUME_GUEST;
865 
866  break;
867  }
868 #endif
869 
870  /* Check the guest TLB. */
871  gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
872  if (gtlb_index < 0) {
873  /* The guest didn't have a mapping for it. */
874  kvmppc_core_queue_dtlb_miss(vcpu,
875  vcpu->arch.fault_dear,
876  vcpu->arch.fault_esr);
877  kvmppc_mmu_dtlb_miss(vcpu);
878  kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
879  r = RESUME_GUEST;
880  break;
881  }
882 
883  gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
884  gfn = gpaddr >> PAGE_SHIFT;
885 
886  if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
887  /* The guest TLB had a mapping, but the shadow TLB
888  * didn't, and it is RAM. This could be because:
889  * a) the entry is mapping the host kernel, or
890  * b) the guest used a large mapping which we're faking
891  * Either way, we need to satisfy the fault without
892  * invoking the guest. */
893  kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
894  kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
895  r = RESUME_GUEST;
896  } else {
897  /* Guest has mapped and accessed a page which is not
898  * actually RAM. */
899  vcpu->arch.paddr_accessed = gpaddr;
900  vcpu->arch.vaddr_accessed = eaddr;
901  r = kvmppc_emulate_mmio(run, vcpu);
902  kvmppc_account_exit(vcpu, MMIO_EXITS);
903  }
904 
905  break;
906  }
907 
909  unsigned long eaddr = vcpu->arch.pc;
910  gpa_t gpaddr;
911  gfn_t gfn;
912  int gtlb_index;
913 
914  r = RESUME_GUEST;
915 
916  /* Check the guest TLB. */
917  gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
918  if (gtlb_index < 0) {
919  /* The guest didn't have a mapping for it. */
920  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
921  kvmppc_mmu_itlb_miss(vcpu);
922  kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
923  break;
924  }
925 
926  kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
927 
928  gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
929  gfn = gpaddr >> PAGE_SHIFT;
930 
931  if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
932  /* The guest TLB had a mapping, but the shadow TLB
933  * didn't. This could be because:
934  * a) the entry is mapping the host kernel, or
935  * b) the guest used a large mapping which we're faking
936  * Either way, we need to satisfy the fault without
937  * invoking the guest. */
938  kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
939  } else {
940  /* Guest mapped and leaped at non-RAM! */
941  kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
942  }
943 
944  break;
945  }
946 
947  case BOOKE_INTERRUPT_DEBUG: {
948  u32 dbsr;
949 
950  vcpu->arch.pc = mfspr(SPRN_CSRR0);
951 
952  /* clear IAC events in DBSR register */
953  dbsr = mfspr(SPRN_DBSR);
954  dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
955  mtspr(SPRN_DBSR, dbsr);
956 
958  kvmppc_account_exit(vcpu, DEBUG_EXITS);
959  r = RESUME_HOST;
960  break;
961  }
962 
963  default:
964  printk(KERN_EMERG "exit_nr %d\n", exit_nr);
965  BUG();
966  }
967 
968  /*
969  * To avoid clobbering exit_reason, only check for signals if we
970  * aren't already exiting to userspace for some other reason.
971  */
972  if (!(r & RESUME_HOST)) {
974  if (kvmppc_prepare_to_enter(vcpu)) {
975  run->exit_reason = KVM_EXIT_INTR;
976  r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
977  kvmppc_account_exit(vcpu, SIGNAL_EXITS);
978  }
979  }
980 
981  return r;
982 }
983 
984 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
985 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
986 {
987  int i;
988  int r;
989 
990  vcpu->arch.pc = 0;
991  vcpu->arch.shared->pir = vcpu->vcpu_id;
992  kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
993  kvmppc_set_msr(vcpu, 0);
994 
995 #ifndef CONFIG_KVM_BOOKE_HV
996  vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
997  vcpu->arch.shadow_pid = 1;
998  vcpu->arch.shared->msr = 0;
999 #endif
1000 
1001  /* Eye-catching numbers so we know if the guest takes an interrupt
1002  * before it's programmed its own IVPR/IVORs. */
1003  vcpu->arch.ivpr = 0x55550000;
1004  for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1005  vcpu->arch.ivor[i] = 0x7700 | i * 4;
1006 
1008 
1009  r = kvmppc_core_vcpu_setup(vcpu);
1010  kvmppc_sanity_check(vcpu);
1011  return r;
1012 }
1013 
1014 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1015 {
1016  int i;
1017 
1018  regs->pc = vcpu->arch.pc;
1019  regs->cr = kvmppc_get_cr(vcpu);
1020  regs->ctr = vcpu->arch.ctr;
1021  regs->lr = vcpu->arch.lr;
1022  regs->xer = kvmppc_get_xer(vcpu);
1023  regs->msr = vcpu->arch.shared->msr;
1024  regs->srr0 = vcpu->arch.shared->srr0;
1025  regs->srr1 = vcpu->arch.shared->srr1;
1026  regs->pid = vcpu->arch.pid;
1027  regs->sprg0 = vcpu->arch.shared->sprg0;
1028  regs->sprg1 = vcpu->arch.shared->sprg1;
1029  regs->sprg2 = vcpu->arch.shared->sprg2;
1030  regs->sprg3 = vcpu->arch.shared->sprg3;
1031  regs->sprg4 = vcpu->arch.shared->sprg4;
1032  regs->sprg5 = vcpu->arch.shared->sprg5;
1033  regs->sprg6 = vcpu->arch.shared->sprg6;
1034  regs->sprg7 = vcpu->arch.shared->sprg7;
1035 
1036  for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1037  regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1038 
1039  return 0;
1040 }
1041 
1042 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1043 {
1044  int i;
1045 
1046  vcpu->arch.pc = regs->pc;
1047  kvmppc_set_cr(vcpu, regs->cr);
1048  vcpu->arch.ctr = regs->ctr;
1049  vcpu->arch.lr = regs->lr;
1050  kvmppc_set_xer(vcpu, regs->xer);
1051  kvmppc_set_msr(vcpu, regs->msr);
1052  vcpu->arch.shared->srr0 = regs->srr0;
1053  vcpu->arch.shared->srr1 = regs->srr1;
1054  kvmppc_set_pid(vcpu, regs->pid);
1055  vcpu->arch.shared->sprg0 = regs->sprg0;
1056  vcpu->arch.shared->sprg1 = regs->sprg1;
1057  vcpu->arch.shared->sprg2 = regs->sprg2;
1058  vcpu->arch.shared->sprg3 = regs->sprg3;
1059  vcpu->arch.shared->sprg4 = regs->sprg4;
1060  vcpu->arch.shared->sprg5 = regs->sprg5;
1061  vcpu->arch.shared->sprg6 = regs->sprg6;
1062  vcpu->arch.shared->sprg7 = regs->sprg7;
1063 
1064  for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1065  kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1066 
1067  return 0;
1068 }
1069 
1070 static void get_sregs_base(struct kvm_vcpu *vcpu,
1071  struct kvm_sregs *sregs)
1072 {
1073  u64 tb = get_tb();
1074 
1075  sregs->u.e.features |= KVM_SREGS_E_BASE;
1076 
1077  sregs->u.e.csrr0 = vcpu->arch.csrr0;
1078  sregs->u.e.csrr1 = vcpu->arch.csrr1;
1079  sregs->u.e.mcsr = vcpu->arch.mcsr;
1080  sregs->u.e.esr = get_guest_esr(vcpu);
1081  sregs->u.e.dear = get_guest_dear(vcpu);
1082  sregs->u.e.tsr = vcpu->arch.tsr;
1083  sregs->u.e.tcr = vcpu->arch.tcr;
1084  sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1085  sregs->u.e.tb = tb;
1086  sregs->u.e.vrsave = vcpu->arch.vrsave;
1087 }
1088 
1089 static int set_sregs_base(struct kvm_vcpu *vcpu,
1090  struct kvm_sregs *sregs)
1091 {
1092  if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1093  return 0;
1094 
1095  vcpu->arch.csrr0 = sregs->u.e.csrr0;
1096  vcpu->arch.csrr1 = sregs->u.e.csrr1;
1097  vcpu->arch.mcsr = sregs->u.e.mcsr;
1098  set_guest_esr(vcpu, sregs->u.e.esr);
1099  set_guest_dear(vcpu, sregs->u.e.dear);
1100  vcpu->arch.vrsave = sregs->u.e.vrsave;
1101  kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1102 
1103  if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1104  vcpu->arch.dec = sregs->u.e.dec;
1105  kvmppc_emulate_dec(vcpu);
1106  }
1107 
1108  if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
1109  vcpu->arch.tsr = sregs->u.e.tsr;
1110  update_timer_ints(vcpu);
1111  }
1112 
1113  return 0;
1114 }
1115 
1116 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1117  struct kvm_sregs *sregs)
1118 {
1119  sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1120 
1121  sregs->u.e.pir = vcpu->vcpu_id;
1122  sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1123  sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1124  sregs->u.e.decar = vcpu->arch.decar;
1125  sregs->u.e.ivpr = vcpu->arch.ivpr;
1126 }
1127 
1128 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1129  struct kvm_sregs *sregs)
1130 {
1131  if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1132  return 0;
1133 
1134  if (sregs->u.e.pir != vcpu->vcpu_id)
1135  return -EINVAL;
1136 
1137  vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1138  vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1139  vcpu->arch.decar = sregs->u.e.decar;
1140  vcpu->arch.ivpr = sregs->u.e.ivpr;
1141 
1142  return 0;
1143 }
1144 
1145 void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1146 {
1147  sregs->u.e.features |= KVM_SREGS_E_IVOR;
1148 
1149  sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1150  sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1151  sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1152  sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1153  sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1154  sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1155  sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1156  sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1157  sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1158  sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1159  sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1160  sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1161  sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1162  sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1163  sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1164  sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1165 }
1166 
1167 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1168 {
1169  if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1170  return 0;
1171 
1172  vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1173  vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1174  vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1175  vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1176  vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1177  vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1178  vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1179  vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1180  vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1181  vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1182  vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1183  vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1184  vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1185  vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1186  vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1187  vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1188 
1189  return 0;
1190 }
1191 
1193  struct kvm_sregs *sregs)
1194 {
1195  sregs->pvr = vcpu->arch.pvr;
1196 
1197  get_sregs_base(vcpu, sregs);
1198  get_sregs_arch206(vcpu, sregs);
1199  kvmppc_core_get_sregs(vcpu, sregs);
1200  return 0;
1201 }
1202 
1204  struct kvm_sregs *sregs)
1205 {
1206  int ret;
1207 
1208  if (vcpu->arch.pvr != sregs->pvr)
1209  return -EINVAL;
1210 
1211  ret = set_sregs_base(vcpu, sregs);
1212  if (ret < 0)
1213  return ret;
1214 
1215  ret = set_sregs_arch206(vcpu, sregs);
1216  if (ret < 0)
1217  return ret;
1218 
1219  return kvmppc_core_set_sregs(vcpu, sregs);
1220 }
1221 
1223 {
1224  return -EINVAL;
1225 }
1226 
1228 {
1229  return -EINVAL;
1230 }
1231 
1233 {
1234  return -ENOTSUPP;
1235 }
1236 
1238 {
1239  return -ENOTSUPP;
1240 }
1241 
1243  struct kvm_translation *tr)
1244 {
1245  int r;
1246 
1247  r = kvmppc_core_vcpu_translate(vcpu, tr);
1248  return r;
1249 }
1250 
1252 {
1253  return -ENOTSUPP;
1254 }
1255 
1258 {
1259  return 0;
1260 }
1261 
1264 {
1265 }
1266 
1267 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1268 {
1269  vcpu->arch.tcr = new_tcr;
1270  update_timer_ints(vcpu);
1271 }
1272 
1273 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1274 {
1275  set_bits(tsr_bits, &vcpu->arch.tsr);
1276  smp_wmb();
1277  kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1278  kvm_vcpu_kick(vcpu);
1279 }
1280 
1281 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1282 {
1283  clear_bits(tsr_bits, &vcpu->arch.tsr);
1284  update_timer_ints(vcpu);
1285 }
1286 
1287 void kvmppc_decrementer_func(unsigned long data)
1288 {
1289  struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1290 
1291  if (vcpu->arch.tcr & TCR_ARE) {
1292  vcpu->arch.dec = vcpu->arch.decar;
1293  kvmppc_emulate_dec(vcpu);
1294  }
1295 
1296  kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1297 }
1298 
1299 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1300 {
1301  current->thread.kvm_vcpu = vcpu;
1302 }
1303 
1305 {
1306  current->thread.kvm_vcpu = NULL;
1307 }
1308 
1310 {
1311 #ifndef CONFIG_KVM_BOOKE_HV
1312  unsigned long ivor[16];
1313  unsigned long max_ivor = 0;
1314  int i;
1315 
1316  /* We install our own exception handlers by hijacking IVPR. IVPR must
1317  * be 16-bit aligned, so we need a 64KB allocation. */
1319  VCPU_SIZE_ORDER);
1320  if (!kvmppc_booke_handlers)
1321  return -ENOMEM;
1322 
1323  /* XXX make sure our handlers are smaller than Linux's */
1324 
1325  /* Copy our interrupt handlers to match host IVORs. That way we don't
1326  * have to swap the IVORs on every guest/host transition. */
1327  ivor[0] = mfspr(SPRN_IVOR0);
1328  ivor[1] = mfspr(SPRN_IVOR1);
1329  ivor[2] = mfspr(SPRN_IVOR2);
1330  ivor[3] = mfspr(SPRN_IVOR3);
1331  ivor[4] = mfspr(SPRN_IVOR4);
1332  ivor[5] = mfspr(SPRN_IVOR5);
1333  ivor[6] = mfspr(SPRN_IVOR6);
1334  ivor[7] = mfspr(SPRN_IVOR7);
1335  ivor[8] = mfspr(SPRN_IVOR8);
1336  ivor[9] = mfspr(SPRN_IVOR9);
1337  ivor[10] = mfspr(SPRN_IVOR10);
1338  ivor[11] = mfspr(SPRN_IVOR11);
1339  ivor[12] = mfspr(SPRN_IVOR12);
1340  ivor[13] = mfspr(SPRN_IVOR13);
1341  ivor[14] = mfspr(SPRN_IVOR14);
1342  ivor[15] = mfspr(SPRN_IVOR15);
1343 
1344  for (i = 0; i < 16; i++) {
1345  if (ivor[i] > max_ivor)
1346  max_ivor = ivor[i];
1347 
1348  memcpy((void *)kvmppc_booke_handlers + ivor[i],
1350  kvmppc_handler_len);
1351  }
1354 #endif /* !BOOKE_HV */
1355  return 0;
1356 }
1357 
1359 {
1361  kvm_exit();
1362 }