Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
powerpc.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <[email protected]>
18  * Christian Ehrhardt <[email protected]>
19  */
20 
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/fs.h>
27 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/tlbflush.h>
32 #include <asm/cputhreads.h>
33 #include "timing.h"
34 #include "../mm/mmu_decl.h"
35 
36 #define CREATE_TRACE_POINTS
37 #include "trace.h"
38 
40 {
41  return !(v->arch.shared->msr & MSR_WE) ||
42  !!(v->arch.pending_exceptions) ||
43  v->requests;
44 }
45 
47 {
48  return 1;
49 }
50 
51 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
52 {
53  int nr = kvmppc_get_gpr(vcpu, 11);
54  int r;
55  unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
56  unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
57  unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
58  unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
59  unsigned long r2 = 0;
60 
61  if (!(vcpu->arch.shared->msr & MSR_SF)) {
62  /* 32 bit mode */
63  param1 &= 0xffffffff;
64  param2 &= 0xffffffff;
65  param3 &= 0xffffffff;
66  param4 &= 0xffffffff;
67  }
68 
69  switch (nr) {
71  {
72  vcpu->arch.magic_page_pa = param1;
73  vcpu->arch.magic_page_ea = param2;
74 
76 
77  r = HC_EV_SUCCESS;
78  break;
79  }
81  r = HC_EV_SUCCESS;
82 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
83  /* XXX Missing magic page on 44x */
84  r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
85 #endif
86 
87  /* Second return value is in r4 */
88  break;
89  default:
91  break;
92  }
93 
94  kvmppc_set_gpr(vcpu, 4, r2);
95 
96  return r;
97 }
98 
99 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
100 {
101  int r = false;
102 
103  /* We have to know what CPU to virtualize */
104  if (!vcpu->arch.pvr)
105  goto out;
106 
107  /* PAPR only works with book3s_64 */
108  if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
109  goto out;
110 
111 #ifdef CONFIG_KVM_BOOK3S_64_HV
112  /* HV KVM can only do PAPR mode for now */
113  if (!vcpu->arch.papr_enabled)
114  goto out;
115 #endif
116 
117 #ifdef CONFIG_KVM_BOOKE_HV
119  goto out;
120 #endif
121 
122  r = true;
123 
124 out:
125  vcpu->arch.sane = r;
126  return r ? 0 : -EINVAL;
127 }
128 
129 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
130 {
131  enum emulation_result er;
132  int r;
133 
134  er = kvmppc_emulate_instruction(run, vcpu);
135  switch (er) {
136  case EMULATE_DONE:
137  /* Future optimization: only reload non-volatiles if they were
138  * actually modified. */
139  r = RESUME_GUEST_NV;
140  break;
141  case EMULATE_DO_MMIO:
142  run->exit_reason = KVM_EXIT_MMIO;
143  /* We must reload nonvolatiles because "update" load/store
144  * instructions modify register state. */
145  /* Future optimization: only reload non-volatiles if they were
146  * actually modified. */
147  r = RESUME_HOST_NV;
148  break;
149  case EMULATE_FAIL:
150  /* XXX Deliver Program interrupt to guest. */
151  printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
152  kvmppc_get_last_inst(vcpu));
153  r = RESUME_HOST;
154  break;
155  default:
156  BUG();
157  }
158 
159  return r;
160 }
161 
162 int kvm_arch_hardware_enable(void *garbage)
163 {
164  return 0;
165 }
166 
167 void kvm_arch_hardware_disable(void *garbage)
168 {
169 }
170 
172 {
173  return 0;
174 }
175 
177 {
178 }
179 
181 {
182  *(int *)rtn = kvmppc_core_check_processor_compat();
183 }
184 
185 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
186 {
187  if (type)
188  return -EINVAL;
189 
190  return kvmppc_core_init_vm(kvm);
191 }
192 
194 {
195  unsigned int i;
196  struct kvm_vcpu *vcpu;
197 
198  kvm_for_each_vcpu(i, vcpu, kvm)
199  kvm_arch_vcpu_free(vcpu);
200 
201  mutex_lock(&kvm->lock);
202  for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
203  kvm->vcpus[i] = NULL;
204 
205  atomic_set(&kvm->online_vcpus, 0);
206 
208 
209  mutex_unlock(&kvm->lock);
210 }
211 
213 {
214 }
215 
217 {
218  int r;
219 
220  switch (ext) {
221 #ifdef CONFIG_BOOKE
223 #else
225  case KVM_CAP_PPC_HIOR:
226  case KVM_CAP_PPC_PAPR:
227 #endif
230  case KVM_CAP_ENABLE_CAP:
231  case KVM_CAP_ONE_REG:
232  r = 1;
233  break;
234 #ifndef CONFIG_KVM_BOOK3S_64_HV
236  case KVM_CAP_PPC_OSI:
238 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
239  case KVM_CAP_SW_TLB:
240 #endif
241  r = 1;
242  break;
245  break;
246 #endif
247 #ifdef CONFIG_PPC_BOOK3S_64
248  case KVM_CAP_SPAPR_TCE:
250  r = 1;
251  break;
252 #endif /* CONFIG_PPC_BOOK3S_64 */
253 #ifdef CONFIG_KVM_BOOK3S_64_HV
254  case KVM_CAP_PPC_SMT:
255  r = threads_per_core;
256  break;
257  case KVM_CAP_PPC_RMA:
258  r = 1;
259  /* PPC970 requires an RMA */
261  r = 2;
262  break;
263  case KVM_CAP_SYNC_MMU:
264  r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
265  break;
266 #endif
267  case KVM_CAP_NR_VCPUS:
268  /*
269  * Recommending a number of CPUs is somewhat arbitrary; we
270  * return the number of present CPUs for -HV (since a host
271  * will have secondary threads "offline"), and for other KVM
272  * implementations just count online CPUs.
273  */
274 #ifdef CONFIG_KVM_BOOK3S_64_HV
275  r = num_present_cpus();
276 #else
277  r = num_online_cpus();
278 #endif
279  break;
280  case KVM_CAP_MAX_VCPUS:
281  r = KVM_MAX_VCPUS;
282  break;
283 #ifdef CONFIG_PPC_BOOK3S_64
285  r = 1;
286  break;
287 #endif
288  default:
289  r = 0;
290  break;
291  }
292  return r;
293 
294 }
295 
296 long kvm_arch_dev_ioctl(struct file *filp,
297  unsigned int ioctl, unsigned long arg)
298 {
299  return -EINVAL;
300 }
301 
303  struct kvm_memory_slot *dont)
304 {
305  if (!dont || free->arch.rmap != dont->arch.rmap) {
306  vfree(free->arch.rmap);
307  free->arch.rmap = NULL;
308  }
309 }
310 
311 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
312 {
313  slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
314  if (!slot->arch.rmap)
315  return -ENOMEM;
316 
317  return 0;
318 }
319 
321  struct kvm_memory_slot *memslot,
322  struct kvm_memory_slot old,
324  int user_alloc)
325 {
326  return kvmppc_core_prepare_memory_region(kvm, mem);
327 }
328 
331  struct kvm_memory_slot old,
332  int user_alloc)
333 {
335 }
336 
338 {
339 }
340 
342  struct kvm_memory_slot *slot)
343 {
344 }
345 
346 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
347 {
348  struct kvm_vcpu *vcpu;
349  vcpu = kvmppc_core_vcpu_create(kvm, id);
350  if (!IS_ERR(vcpu)) {
351  vcpu->arch.wqp = &vcpu->wq;
352  kvmppc_create_vcpu_debugfs(vcpu, id);
353  }
354  return vcpu;
355 }
356 
357 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
358 {
359  /* Make sure we're not using the vcpu anymore */
360  hrtimer_cancel(&vcpu->arch.dec_timer);
361  tasklet_kill(&vcpu->arch.tasklet);
362 
364  kvmppc_core_vcpu_free(vcpu);
365 }
366 
367 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
368 {
369  kvm_arch_vcpu_free(vcpu);
370 }
371 
373 {
374  return kvmppc_core_pending_dec(vcpu);
375 }
376 
377 /*
378  * low level hrtimer wake routine. Because this runs in hardirq context
379  * we schedule a tasklet to do the real work.
380  */
382 {
383  struct kvm_vcpu *vcpu;
384 
385  vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
386  tasklet_schedule(&vcpu->arch.tasklet);
387 
388  return HRTIMER_NORESTART;
389 }
390 
391 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
392 {
393  hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
394  tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
395  vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
396  vcpu->arch.dec_expires = ~(u64)0;
397 
398 #ifdef CONFIG_KVM_EXIT_TIMING
399  mutex_init(&vcpu->arch.exit_timing_lock);
400 #endif
401 
402  return 0;
403 }
404 
405 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
406 {
407  kvmppc_mmu_destroy(vcpu);
408 }
409 
410 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
411 {
412 #ifdef CONFIG_BOOKE
413  /*
414  * vrsave (formerly usprg0) isn't used by Linux, but may
415  * be used by the guest.
416  *
417  * On non-booke this is associated with Altivec and
418  * is handled by code in book3s.c.
419  */
420  mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
421 #endif
422  kvmppc_core_vcpu_load(vcpu, cpu);
423  vcpu->cpu = smp_processor_id();
424 }
425 
426 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
427 {
428  kvmppc_core_vcpu_put(vcpu);
429 #ifdef CONFIG_BOOKE
430  vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
431 #endif
432  vcpu->cpu = -1;
433 }
434 
436  struct kvm_guest_debug *dbg)
437 {
438  return -EINVAL;
439 }
440 
441 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
442  struct kvm_run *run)
443 {
444  kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
445 }
446 
447 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
448  struct kvm_run *run)
449 {
450  u64 uninitialized_var(gpr);
451 
452  if (run->mmio.len > sizeof(gpr)) {
453  printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
454  return;
455  }
456 
457  if (vcpu->arch.mmio_is_bigendian) {
458  switch (run->mmio.len) {
459  case 8: gpr = *(u64 *)run->mmio.data; break;
460  case 4: gpr = *(u32 *)run->mmio.data; break;
461  case 2: gpr = *(u16 *)run->mmio.data; break;
462  case 1: gpr = *(u8 *)run->mmio.data; break;
463  }
464  } else {
465  /* Convert BE data from userland back to LE. */
466  switch (run->mmio.len) {
467  case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
468  case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
469  case 1: gpr = *(u8 *)run->mmio.data; break;
470  }
471  }
472 
473  if (vcpu->arch.mmio_sign_extend) {
474  switch (run->mmio.len) {
475 #ifdef CONFIG_PPC64
476  case 4:
477  gpr = (s64)(s32)gpr;
478  break;
479 #endif
480  case 2:
481  gpr = (s64)(s16)gpr;
482  break;
483  case 1:
484  gpr = (s64)(s8)gpr;
485  break;
486  }
487  }
488 
489  kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
490 
491  switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
492  case KVM_MMIO_REG_GPR:
493  kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
494  break;
495  case KVM_MMIO_REG_FPR:
496  vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
497  break;
498 #ifdef CONFIG_PPC_BOOK3S
499  case KVM_MMIO_REG_QPR:
500  vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
501  break;
502  case KVM_MMIO_REG_FQPR:
503  vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
504  vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
505  break;
506 #endif
507  default:
508  BUG();
509  }
510 }
511 
512 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
513  unsigned int rt, unsigned int bytes, int is_bigendian)
514 {
515  if (bytes > sizeof(run->mmio.data)) {
516  printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
517  run->mmio.len);
518  }
519 
520  run->mmio.phys_addr = vcpu->arch.paddr_accessed;
521  run->mmio.len = bytes;
522  run->mmio.is_write = 0;
523 
524  vcpu->arch.io_gpr = rt;
525  vcpu->arch.mmio_is_bigendian = is_bigendian;
526  vcpu->mmio_needed = 1;
527  vcpu->mmio_is_write = 0;
528  vcpu->arch.mmio_sign_extend = 0;
529 
530  return EMULATE_DO_MMIO;
531 }
532 
533 /* Same as above, but sign extends */
534 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
535  unsigned int rt, unsigned int bytes, int is_bigendian)
536 {
537  int r;
538 
539  r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
540  vcpu->arch.mmio_sign_extend = 1;
541 
542  return r;
543 }
544 
545 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
546  u64 val, unsigned int bytes, int is_bigendian)
547 {
548  void *data = run->mmio.data;
549 
550  if (bytes > sizeof(run->mmio.data)) {
551  printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
552  run->mmio.len);
553  }
554 
555  run->mmio.phys_addr = vcpu->arch.paddr_accessed;
556  run->mmio.len = bytes;
557  run->mmio.is_write = 1;
558  vcpu->mmio_needed = 1;
559  vcpu->mmio_is_write = 1;
560 
561  /* Store the value at the lowest bytes in 'data'. */
562  if (is_bigendian) {
563  switch (bytes) {
564  case 8: *(u64 *)data = val; break;
565  case 4: *(u32 *)data = val; break;
566  case 2: *(u16 *)data = val; break;
567  case 1: *(u8 *)data = val; break;
568  }
569  } else {
570  /* Store LE value into 'data'. */
571  switch (bytes) {
572  case 4: st_le32(data, val); break;
573  case 2: st_le16(data, val); break;
574  case 1: *(u8 *)data = val; break;
575  }
576  }
577 
578  return EMULATE_DO_MMIO;
579 }
580 
581 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
582 {
583  int r;
584  sigset_t sigsaved;
585 
586  if (vcpu->sigset_active)
587  sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
588 
589  if (vcpu->mmio_needed) {
590  if (!vcpu->mmio_is_write)
591  kvmppc_complete_mmio_load(vcpu, run);
592  vcpu->mmio_needed = 0;
593  } else if (vcpu->arch.dcr_needed) {
594  if (!vcpu->arch.dcr_is_write)
595  kvmppc_complete_dcr_load(vcpu, run);
596  vcpu->arch.dcr_needed = 0;
597  } else if (vcpu->arch.osi_needed) {
598  u64 *gprs = run->osi.gprs;
599  int i;
600 
601  for (i = 0; i < 32; i++)
602  kvmppc_set_gpr(vcpu, i, gprs[i]);
603  vcpu->arch.osi_needed = 0;
604  } else if (vcpu->arch.hcall_needed) {
605  int i;
606 
607  kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
608  for (i = 0; i < 9; ++i)
609  kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
610  vcpu->arch.hcall_needed = 0;
611  }
612 
613  r = kvmppc_vcpu_run(run, vcpu);
614 
615  if (vcpu->sigset_active)
616  sigprocmask(SIG_SETMASK, &sigsaved, NULL);
617 
618  return r;
619 }
620 
621 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
622 {
623  if (irq->irq == KVM_INTERRUPT_UNSET) {
624  kvmppc_core_dequeue_external(vcpu, irq);
625  return 0;
626  }
627 
628  kvmppc_core_queue_external(vcpu, irq);
629 
630  kvm_vcpu_kick(vcpu);
631 
632  return 0;
633 }
634 
635 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
636  struct kvm_enable_cap *cap)
637 {
638  int r;
639 
640  if (cap->flags)
641  return -EINVAL;
642 
643  switch (cap->cap) {
644  case KVM_CAP_PPC_OSI:
645  r = 0;
646  vcpu->arch.osi_enabled = true;
647  break;
648  case KVM_CAP_PPC_PAPR:
649  r = 0;
650  vcpu->arch.papr_enabled = true;
651  break;
652 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
653  case KVM_CAP_SW_TLB: {
654  struct kvm_config_tlb cfg;
655  void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
656 
657  r = -EFAULT;
658  if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
659  break;
660 
661  r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
662  break;
663  }
664 #endif
665  default:
666  r = -EINVAL;
667  break;
668  }
669 
670  if (!r)
671  r = kvmppc_sanity_check(vcpu);
672 
673  return r;
674 }
675 
677  struct kvm_mp_state *mp_state)
678 {
679  return -EINVAL;
680 }
681 
683  struct kvm_mp_state *mp_state)
684 {
685  return -EINVAL;
686 }
687 
688 long kvm_arch_vcpu_ioctl(struct file *filp,
689  unsigned int ioctl, unsigned long arg)
690 {
691  struct kvm_vcpu *vcpu = filp->private_data;
692  void __user *argp = (void __user *)arg;
693  long r;
694 
695  switch (ioctl) {
696  case KVM_INTERRUPT: {
697  struct kvm_interrupt irq;
698  r = -EFAULT;
699  if (copy_from_user(&irq, argp, sizeof(irq)))
700  goto out;
701  r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
702  goto out;
703  }
704 
705  case KVM_ENABLE_CAP:
706  {
707  struct kvm_enable_cap cap;
708  r = -EFAULT;
709  if (copy_from_user(&cap, argp, sizeof(cap)))
710  goto out;
711  r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
712  break;
713  }
714 
715  case KVM_SET_ONE_REG:
716  case KVM_GET_ONE_REG:
717  {
718  struct kvm_one_reg reg;
719  r = -EFAULT;
720  if (copy_from_user(&reg, argp, sizeof(reg)))
721  goto out;
722  if (ioctl == KVM_SET_ONE_REG)
723  r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
724  else
725  r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
726  break;
727  }
728 
729 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
730  case KVM_DIRTY_TLB: {
731  struct kvm_dirty_tlb dirty;
732  r = -EFAULT;
733  if (copy_from_user(&dirty, argp, sizeof(dirty)))
734  goto out;
735  r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
736  break;
737  }
738 #endif
739  default:
740  r = -EINVAL;
741  }
742 
743 out:
744  return r;
745 }
746 
747 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
748 {
749  return VM_FAULT_SIGBUS;
750 }
751 
752 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
753 {
754  u32 inst_lis = 0x3c000000;
755  u32 inst_ori = 0x60000000;
756  u32 inst_nop = 0x60000000;
757  u32 inst_sc = 0x44000002;
758  u32 inst_imm_mask = 0xffff;
759 
760  /*
761  * The hypercall to get into KVM from within guest context is as
762  * follows:
763  *
764  * lis r0, r0, KVM_SC_MAGIC_R0@h
765  * ori r0, KVM_SC_MAGIC_R0@l
766  * sc
767  * nop
768  */
769  pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
770  pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
771  pvinfo->hcall[2] = inst_sc;
772  pvinfo->hcall[3] = inst_nop;
773 
774  return 0;
775 }
776 
777 long kvm_arch_vm_ioctl(struct file *filp,
778  unsigned int ioctl, unsigned long arg)
779 {
780  void __user *argp = (void __user *)arg;
781  long r;
782 
783  switch (ioctl) {
784  case KVM_PPC_GET_PVINFO: {
785  struct kvm_ppc_pvinfo pvinfo;
786  memset(&pvinfo, 0, sizeof(pvinfo));
787  r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
788  if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
789  r = -EFAULT;
790  goto out;
791  }
792 
793  break;
794  }
795 #ifdef CONFIG_PPC_BOOK3S_64
796  case KVM_CREATE_SPAPR_TCE: {
797  struct kvm_create_spapr_tce create_tce;
798  struct kvm *kvm = filp->private_data;
799 
800  r = -EFAULT;
801  if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
802  goto out;
803  r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
804  goto out;
805  }
806 #endif /* CONFIG_PPC_BOOK3S_64 */
807 
808 #ifdef CONFIG_KVM_BOOK3S_64_HV
809  case KVM_ALLOCATE_RMA: {
810  struct kvm *kvm = filp->private_data;
811  struct kvm_allocate_rma rma;
812 
813  r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
814  if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
815  r = -EFAULT;
816  break;
817  }
818 
819  case KVM_PPC_ALLOCATE_HTAB: {
820  struct kvm *kvm = filp->private_data;
821  u32 htab_order;
822 
823  r = -EFAULT;
824  if (get_user(htab_order, (u32 __user *)argp))
825  break;
826  r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
827  if (r)
828  break;
829  r = -EFAULT;
830  if (put_user(htab_order, (u32 __user *)argp))
831  break;
832  r = 0;
833  break;
834  }
835 #endif /* CONFIG_KVM_BOOK3S_64_HV */
836 
837 #ifdef CONFIG_PPC_BOOK3S_64
838  case KVM_PPC_GET_SMMU_INFO: {
839  struct kvm *kvm = filp->private_data;
840  struct kvm_ppc_smmu_info info;
841 
842  memset(&info, 0, sizeof(info));
843  r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
844  if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
845  r = -EFAULT;
846  break;
847  }
848 #endif /* CONFIG_PPC_BOOK3S_64 */
849  default:
850  r = -ENOTTY;
851  }
852 
853 out:
854  return r;
855 }
856 
857 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
858 static unsigned long nr_lpids;
859 
861 {
862  long lpid;
863 
864  do {
865  lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
866  if (lpid >= nr_lpids) {
867  pr_err("%s: No LPIDs free\n", __func__);
868  return -ENOMEM;
869  }
870  } while (test_and_set_bit(lpid, lpid_inuse));
871 
872  return lpid;
873 }
874 
875 void kvmppc_claim_lpid(long lpid)
876 {
877  set_bit(lpid, lpid_inuse);
878 }
879 
880 void kvmppc_free_lpid(long lpid)
881 {
882  clear_bit(lpid, lpid_inuse);
883 }
884 
885 void kvmppc_init_lpid(unsigned long nr_lpids_param)
886 {
887  nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
888  memset(lpid_inuse, 0, sizeof(lpid_inuse));
889 }
890 
891 int kvm_arch_init(void *opaque)
892 {
893  return 0;
894 }
895 
896 void kvm_arch_exit(void)
897 {
898 }