Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
book3s.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  * Alexander Graf <[email protected]>
6  * Kevin Wolf <[email protected]>
7  *
8  * Description:
9  * This file is derived from arch/powerpc/kvm/44x.c,
10  * by Hollis Blanchard <[email protected]>.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License, version 2, as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 
22 #include <asm/reg.h>
23 #include <asm/cputable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/uaccess.h>
27 #include <asm/io.h>
28 #include <asm/kvm_ppc.h>
29 #include <asm/kvm_book3s.h>
30 #include <asm/mmu_context.h>
31 #include <asm/page.h>
32 #include <linux/gfp.h>
33 #include <linux/sched.h>
34 #include <linux/vmalloc.h>
35 #include <linux/highmem.h>
36 
37 #include "trace.h"
38 
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40 
41 /* #define EXIT_DEBUG */
42 
44  { "exits", VCPU_STAT(sum_exits) },
45  { "mmio", VCPU_STAT(mmio_exits) },
46  { "sig", VCPU_STAT(signal_exits) },
47  { "sysc", VCPU_STAT(syscall_exits) },
48  { "inst_emu", VCPU_STAT(emulated_inst_exits) },
49  { "dec", VCPU_STAT(dec_exits) },
50  { "ext_intr", VCPU_STAT(ext_intr_exits) },
51  { "queue_intr", VCPU_STAT(queue_intr) },
52  { "halt_wakeup", VCPU_STAT(halt_wakeup) },
53  { "pf_storage", VCPU_STAT(pf_storage) },
54  { "sp_storage", VCPU_STAT(sp_storage) },
55  { "pf_instruc", VCPU_STAT(pf_instruc) },
56  { "sp_instruc", VCPU_STAT(sp_instruc) },
57  { "ld", VCPU_STAT(ld) },
58  { "ld_slow", VCPU_STAT(ld_slow) },
59  { "st", VCPU_STAT(st) },
60  { "st_slow", VCPU_STAT(st_slow) },
61  { NULL }
62 };
63 
65 {
66 }
67 
69 {
70 }
71 
72 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
73 {
74  vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
75  vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
76  kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
77  vcpu->arch.mmu.reset_msr(vcpu);
78 }
79 
80 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
81 {
82  unsigned int prio;
83 
84  switch (vec) {
85  case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
86  case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
87  case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
88  case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
89  case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
90  case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
91  case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
92  case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
93  case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
94  case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
95  case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
96  case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
97  case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
98  case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
99  case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
100  case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
101  default: prio = BOOK3S_IRQPRIO_MAX; break;
102  }
103 
104  return prio;
105 }
106 
107 static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
108  unsigned int vec)
109 {
110  unsigned long old_pending = vcpu->arch.pending_exceptions;
111 
112  clear_bit(kvmppc_book3s_vec2irqprio(vec),
113  &vcpu->arch.pending_exceptions);
114 
115  kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
116  old_pending);
117 }
118 
119 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
120 {
121  vcpu->stat.queue_intr++;
122 
123  set_bit(kvmppc_book3s_vec2irqprio(vec),
124  &vcpu->arch.pending_exceptions);
125 #ifdef EXIT_DEBUG
126  printk(KERN_INFO "Queueing interrupt %x\n", vec);
127 #endif
128 }
129 
130 
132 {
133  /* might as well deliver this straight away */
135 }
136 
137 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
138 {
140 }
141 
143 {
144  return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
145 }
146 
148 {
149  kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
150 }
151 
153  struct kvm_interrupt *irq)
154 {
155  unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
156 
157  if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
159 
160  kvmppc_book3s_queue_irqprio(vcpu, vec);
161 }
162 
164  struct kvm_interrupt *irq)
165 {
166  kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
167  kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
168 }
169 
170 int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
171 {
172  int deliver = 1;
173  int vec = 0;
174  bool crit = kvmppc_critical_section(vcpu);
175 
176  switch (priority) {
178  deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
180  break;
183  deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
185  break;
188  break;
191  break;
194  break;
197  break;
200  break;
203  break;
206  break;
209  break;
210  case BOOK3S_IRQPRIO_VSX:
211  vec = BOOK3S_INTERRUPT_VSX;
212  break;
215  break;
218  break;
221  break;
224  break;
227  break;
228  default:
229  deliver = 0;
230  printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
231  break;
232  }
233 
234 #if 0
235  printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
236 #endif
237 
238  if (deliver)
239  kvmppc_inject_interrupt(vcpu, vec, 0);
240 
241  return deliver;
242 }
243 
244 /*
245  * This function determines if an irqprio should be cleared once issued.
246  */
247 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
248 {
249  switch (priority) {
251  /* DEC interrupts get cleared by mtdec */
252  return false;
254  /* External interrupts get cleared by userspace */
255  return false;
256  }
257 
258  return true;
259 }
260 
262 {
263  unsigned long *pending = &vcpu->arch.pending_exceptions;
264  unsigned long old_pending = vcpu->arch.pending_exceptions;
265  unsigned int priority;
266 
267 #ifdef EXIT_DEBUG
268  if (vcpu->arch.pending_exceptions)
269  printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
270 #endif
271  priority = __ffs(*pending);
272  while (priority < BOOK3S_IRQPRIO_MAX) {
273  if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
274  clear_irqprio(vcpu, priority)) {
275  clear_bit(priority, &vcpu->arch.pending_exceptions);
276  break;
277  }
278 
279  priority = find_next_bit(pending,
280  BITS_PER_BYTE * sizeof(*pending),
281  priority + 1);
282  }
283 
284  /* Tell the guest about our interrupt status */
285  kvmppc_update_int_pending(vcpu, *pending, old_pending);
286 
287  return 0;
288 }
289 
291 {
292  ulong mp_pa = vcpu->arch.magic_page_pa;
293 
294  if (!(vcpu->arch.shared->msr & MSR_SF))
295  mp_pa = (uint32_t)mp_pa;
296 
297  /* Magic page override */
298  if (unlikely(mp_pa) &&
299  unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
300  ((mp_pa & PAGE_MASK) & KVM_PAM))) {
301  ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
302  pfn_t pfn;
303 
304  pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
305  get_page(pfn_to_page(pfn));
306  return pfn;
307  }
308 
309  return gfn_to_pfn(vcpu->kvm, gfn);
310 }
311 
312 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
313  struct kvmppc_pte *pte)
314 {
315  int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
316  int r;
317 
318  if (relocated) {
319  r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
320  } else {
321  pte->eaddr = eaddr;
322  pte->raddr = eaddr & KVM_PAM;
323  pte->vpage = VSID_REAL | eaddr >> 12;
324  pte->may_read = true;
325  pte->may_write = true;
326  pte->may_execute = true;
327  r = 0;
328  }
329 
330  return r;
331 }
332 
333 static hva_t kvmppc_bad_hva(void)
334 {
335  return PAGE_OFFSET;
336 }
337 
338 static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
339  bool read)
340 {
341  hva_t hpage;
342 
343  if (read && !pte->may_read)
344  goto err;
345 
346  if (!read && !pte->may_write)
347  goto err;
348 
349  hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
350  if (kvm_is_error_hva(hpage))
351  goto err;
352 
353  return hpage | (pte->raddr & ~PAGE_MASK);
354 err:
355  return kvmppc_bad_hva();
356 }
357 
358 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
359  bool data)
360 {
361  struct kvmppc_pte pte;
362 
363  vcpu->stat.st++;
364 
365  if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
366  return -ENOENT;
367 
368  *eaddr = pte.raddr;
369 
370  if (!pte.may_write)
371  return -EPERM;
372 
373  if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
374  return EMULATE_DO_MMIO;
375 
376  return EMULATE_DONE;
377 }
378 
379 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
380  bool data)
381 {
382  struct kvmppc_pte pte;
383  hva_t hva = *eaddr;
384 
385  vcpu->stat.ld++;
386 
387  if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
388  goto nopte;
389 
390  *eaddr = pte.raddr;
391 
392  hva = kvmppc_pte_to_hva(vcpu, &pte, true);
393  if (kvm_is_error_hva(hva))
394  goto mmio;
395 
396  if (copy_from_user(ptr, (void __user *)hva, size)) {
397  printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
398  goto mmio;
399  }
400 
401  return EMULATE_DONE;
402 
403 nopte:
404  return -ENOENT;
405 mmio:
406  return EMULATE_DO_MMIO;
407 }
408 
409 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
410 {
411  return 0;
412 }
413 
415 {
416  int i;
417 
418  regs->pc = kvmppc_get_pc(vcpu);
419  regs->cr = kvmppc_get_cr(vcpu);
420  regs->ctr = kvmppc_get_ctr(vcpu);
421  regs->lr = kvmppc_get_lr(vcpu);
422  regs->xer = kvmppc_get_xer(vcpu);
423  regs->msr = vcpu->arch.shared->msr;
424  regs->srr0 = vcpu->arch.shared->srr0;
425  regs->srr1 = vcpu->arch.shared->srr1;
426  regs->pid = vcpu->arch.pid;
427  regs->sprg0 = vcpu->arch.shared->sprg0;
428  regs->sprg1 = vcpu->arch.shared->sprg1;
429  regs->sprg2 = vcpu->arch.shared->sprg2;
430  regs->sprg3 = vcpu->arch.shared->sprg3;
431  regs->sprg4 = vcpu->arch.shared->sprg4;
432  regs->sprg5 = vcpu->arch.shared->sprg5;
433  regs->sprg6 = vcpu->arch.shared->sprg6;
434  regs->sprg7 = vcpu->arch.shared->sprg7;
435 
436  for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
437  regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
438 
439  return 0;
440 }
441 
443 {
444  int i;
445 
446  kvmppc_set_pc(vcpu, regs->pc);
447  kvmppc_set_cr(vcpu, regs->cr);
448  kvmppc_set_ctr(vcpu, regs->ctr);
449  kvmppc_set_lr(vcpu, regs->lr);
450  kvmppc_set_xer(vcpu, regs->xer);
451  kvmppc_set_msr(vcpu, regs->msr);
452  vcpu->arch.shared->srr0 = regs->srr0;
453  vcpu->arch.shared->srr1 = regs->srr1;
454  vcpu->arch.shared->sprg0 = regs->sprg0;
455  vcpu->arch.shared->sprg1 = regs->sprg1;
456  vcpu->arch.shared->sprg2 = regs->sprg2;
457  vcpu->arch.shared->sprg3 = regs->sprg3;
458  vcpu->arch.shared->sprg4 = regs->sprg4;
459  vcpu->arch.shared->sprg5 = regs->sprg5;
460  vcpu->arch.shared->sprg6 = regs->sprg6;
461  vcpu->arch.shared->sprg7 = regs->sprg7;
462 
463  for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
464  kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
465 
466  return 0;
467 }
468 
470 {
471  return -ENOTSUPP;
472 }
473 
475 {
476  return -ENOTSUPP;
477 }
478 
480  struct kvm_translation *tr)
481 {
482  return 0;
483 }
484 
485 void kvmppc_decrementer_func(unsigned long data)
486 {
487  struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
488 
489  kvmppc_core_queue_dec(vcpu);
490  kvm_vcpu_kick(vcpu);
491 }