Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
book3s_emulate.c
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <[email protected]>
18  */
19 
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/kvm_book3s.h>
23 #include <asm/reg.h>
24 #include <asm/switch_to.h>
25 
26 #define OP_19_XOP_RFID 18
27 #define OP_19_XOP_RFI 50
28 
29 #define OP_31_XOP_MFMSR 83
30 #define OP_31_XOP_MTMSR 146
31 #define OP_31_XOP_MTMSRD 178
32 #define OP_31_XOP_MTSR 210
33 #define OP_31_XOP_MTSRIN 242
34 #define OP_31_XOP_TLBIEL 274
35 #define OP_31_XOP_TLBIE 306
36 #define OP_31_XOP_SLBMTE 402
37 #define OP_31_XOP_SLBIE 434
38 #define OP_31_XOP_SLBIA 498
39 #define OP_31_XOP_MFSR 595
40 #define OP_31_XOP_MFSRIN 659
41 #define OP_31_XOP_DCBA 758
42 #define OP_31_XOP_SLBMFEV 851
43 #define OP_31_XOP_EIOIO 854
44 #define OP_31_XOP_SLBMFEE 915
45 
46 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
47 #define OP_31_XOP_DCBZ 1010
48 
49 #define OP_LFS 48
50 #define OP_LFD 50
51 #define OP_STFS 52
52 #define OP_STFD 54
53 
54 #define SPRN_GQR0 912
55 #define SPRN_GQR1 913
56 #define SPRN_GQR2 914
57 #define SPRN_GQR3 915
58 #define SPRN_GQR4 916
59 #define SPRN_GQR5 917
60 #define SPRN_GQR6 918
61 #define SPRN_GQR7 919
62 
63 /* Book3S_32 defines mfsrin(v) - but that messes up our abstract
64  * function pointers, so let's just disable the define. */
65 #undef mfsrin
66 
67 enum priv_level {
71 };
72 
73 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
74 {
75  /* PAPR VMs only access supervisor SPRs */
76  if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
77  return false;
78 
79  /* Limit user space to its own small SPR set */
80  if ((vcpu->arch.shared->msr & MSR_PR) && level > PRIV_PROBLEM)
81  return false;
82 
83  return true;
84 }
85 
86 int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
87  unsigned int inst, int *advance)
88 {
89  int emulated = EMULATE_DONE;
90  int rt = get_rt(inst);
91  int rs = get_rs(inst);
92  int ra = get_ra(inst);
93  int rb = get_rb(inst);
94 
95  switch (get_op(inst)) {
96  case 19:
97  switch (get_xop(inst)) {
98  case OP_19_XOP_RFID:
99  case OP_19_XOP_RFI:
100  kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0);
101  kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
102  *advance = 0;
103  break;
104 
105  default:
106  emulated = EMULATE_FAIL;
107  break;
108  }
109  break;
110  case 31:
111  switch (get_xop(inst)) {
112  case OP_31_XOP_MFMSR:
113  kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
114  break;
115  case OP_31_XOP_MTMSRD:
116  {
117  ulong rs_val = kvmppc_get_gpr(vcpu, rs);
118  if (inst & 0x10000) {
119  ulong new_msr = vcpu->arch.shared->msr;
120  new_msr &= ~(MSR_RI | MSR_EE);
121  new_msr |= rs_val & (MSR_RI | MSR_EE);
122  vcpu->arch.shared->msr = new_msr;
123  } else
124  kvmppc_set_msr(vcpu, rs_val);
125  break;
126  }
127  case OP_31_XOP_MTMSR:
128  kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
129  break;
130  case OP_31_XOP_MFSR:
131  {
132  int srnum;
133 
134  srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
135  if (vcpu->arch.mmu.mfsrin) {
136  u32 sr;
137  sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
138  kvmppc_set_gpr(vcpu, rt, sr);
139  }
140  break;
141  }
142  case OP_31_XOP_MFSRIN:
143  {
144  int srnum;
145 
146  srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
147  if (vcpu->arch.mmu.mfsrin) {
148  u32 sr;
149  sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
150  kvmppc_set_gpr(vcpu, rt, sr);
151  }
152  break;
153  }
154  case OP_31_XOP_MTSR:
155  vcpu->arch.mmu.mtsrin(vcpu,
156  (inst >> 16) & 0xf,
157  kvmppc_get_gpr(vcpu, rs));
158  break;
159  case OP_31_XOP_MTSRIN:
160  vcpu->arch.mmu.mtsrin(vcpu,
161  (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
162  kvmppc_get_gpr(vcpu, rs));
163  break;
164  case OP_31_XOP_TLBIE:
165  case OP_31_XOP_TLBIEL:
166  {
167  bool large = (inst & 0x00200000) ? true : false;
168  ulong addr = kvmppc_get_gpr(vcpu, rb);
169  vcpu->arch.mmu.tlbie(vcpu, addr, large);
170  break;
171  }
172  case OP_31_XOP_EIOIO:
173  break;
174  case OP_31_XOP_SLBMTE:
175  if (!vcpu->arch.mmu.slbmte)
176  return EMULATE_FAIL;
177 
178  vcpu->arch.mmu.slbmte(vcpu,
179  kvmppc_get_gpr(vcpu, rs),
180  kvmppc_get_gpr(vcpu, rb));
181  break;
182  case OP_31_XOP_SLBIE:
183  if (!vcpu->arch.mmu.slbie)
184  return EMULATE_FAIL;
185 
186  vcpu->arch.mmu.slbie(vcpu,
187  kvmppc_get_gpr(vcpu, rb));
188  break;
189  case OP_31_XOP_SLBIA:
190  if (!vcpu->arch.mmu.slbia)
191  return EMULATE_FAIL;
192 
193  vcpu->arch.mmu.slbia(vcpu);
194  break;
195  case OP_31_XOP_SLBMFEE:
196  if (!vcpu->arch.mmu.slbmfee) {
197  emulated = EMULATE_FAIL;
198  } else {
199  ulong t, rb_val;
200 
201  rb_val = kvmppc_get_gpr(vcpu, rb);
202  t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
203  kvmppc_set_gpr(vcpu, rt, t);
204  }
205  break;
206  case OP_31_XOP_SLBMFEV:
207  if (!vcpu->arch.mmu.slbmfev) {
208  emulated = EMULATE_FAIL;
209  } else {
210  ulong t, rb_val;
211 
212  rb_val = kvmppc_get_gpr(vcpu, rb);
213  t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
214  kvmppc_set_gpr(vcpu, rt, t);
215  }
216  break;
217  case OP_31_XOP_DCBA:
218  /* Gets treated as NOP */
219  break;
220  case OP_31_XOP_DCBZ:
221  {
222  ulong rb_val = kvmppc_get_gpr(vcpu, rb);
223  ulong ra_val = 0;
224  ulong addr, vaddr;
225  u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
226  u32 dsisr;
227  int r;
228 
229  if (ra)
230  ra_val = kvmppc_get_gpr(vcpu, ra);
231 
232  addr = (ra_val + rb_val) & ~31ULL;
233  if (!(vcpu->arch.shared->msr & MSR_SF))
234  addr &= 0xffffffff;
235  vaddr = addr;
236 
237  r = kvmppc_st(vcpu, &addr, 32, zeros, true);
238  if ((r == -ENOENT) || (r == -EPERM)) {
239  struct kvmppc_book3s_shadow_vcpu *svcpu;
240 
241  svcpu = svcpu_get(vcpu);
242  *advance = 0;
243  vcpu->arch.shared->dar = vaddr;
244  svcpu->fault_dar = vaddr;
245 
246  dsisr = DSISR_ISSTORE;
247  if (r == -ENOENT)
248  dsisr |= DSISR_NOHPTE;
249  else if (r == -EPERM)
250  dsisr |= DSISR_PROTFAULT;
251 
252  vcpu->arch.shared->dsisr = dsisr;
253  svcpu->fault_dsisr = dsisr;
254  svcpu_put(svcpu);
255 
258  }
259 
260  break;
261  }
262  default:
263  emulated = EMULATE_FAIL;
264  }
265  break;
266  default:
267  emulated = EMULATE_FAIL;
268  }
269 
270  if (emulated == EMULATE_FAIL)
271  emulated = kvmppc_emulate_paired_single(run, vcpu);
272 
273  return emulated;
274 }
275 
276 void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
277  u32 val)
278 {
279  if (upper) {
280  /* Upper BAT */
281  u32 bl = (val >> 2) & 0x7ff;
282  bat->bepi_mask = (~bl << 17);
283  bat->bepi = val & 0xfffe0000;
284  bat->vs = (val & 2) ? 1 : 0;
285  bat->vp = (val & 1) ? 1 : 0;
286  bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
287  } else {
288  /* Lower BAT */
289  bat->brpn = val & 0xfffe0000;
290  bat->wimg = (val >> 3) & 0xf;
291  bat->pp = val & 3;
292  bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
293  }
294 }
295 
296 static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
297 {
298  struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
299  struct kvmppc_bat *bat;
300 
301  switch (sprn) {
302  case SPRN_IBAT0U ... SPRN_IBAT3L:
303  bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
304  break;
305  case SPRN_IBAT4U ... SPRN_IBAT7L:
306  bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
307  break;
308  case SPRN_DBAT0U ... SPRN_DBAT3L:
309  bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
310  break;
311  case SPRN_DBAT4U ... SPRN_DBAT7L:
312  bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
313  break;
314  default:
315  BUG();
316  }
317 
318  return bat;
319 }
320 
321 int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
322 {
323  int emulated = EMULATE_DONE;
324 
325  switch (sprn) {
326  case SPRN_SDR1:
327  if (!spr_allowed(vcpu, PRIV_HYPER))
328  goto unprivileged;
329  to_book3s(vcpu)->sdr1 = spr_val;
330  break;
331  case SPRN_DSISR:
332  vcpu->arch.shared->dsisr = spr_val;
333  break;
334  case SPRN_DAR:
335  vcpu->arch.shared->dar = spr_val;
336  break;
337  case SPRN_HIOR:
338  to_book3s(vcpu)->hior = spr_val;
339  break;
340  case SPRN_IBAT0U ... SPRN_IBAT3L:
341  case SPRN_IBAT4U ... SPRN_IBAT7L:
342  case SPRN_DBAT0U ... SPRN_DBAT3L:
343  case SPRN_DBAT4U ... SPRN_DBAT7L:
344  {
345  struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
346 
347  kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
348  /* BAT writes happen so rarely that we're ok to flush
349  * everything here */
350  kvmppc_mmu_pte_flush(vcpu, 0, 0);
352  break;
353  }
354  case SPRN_HID0:
355  to_book3s(vcpu)->hid[0] = spr_val;
356  break;
357  case SPRN_HID1:
358  to_book3s(vcpu)->hid[1] = spr_val;
359  break;
360  case SPRN_HID2:
361  to_book3s(vcpu)->hid[2] = spr_val;
362  break;
363  case SPRN_HID2_GEKKO:
364  to_book3s(vcpu)->hid[2] = spr_val;
365  /* HID2.PSE controls paired single on gekko */
366  switch (vcpu->arch.pvr) {
367  case 0x00080200: /* lonestar 2.0 */
368  case 0x00088202: /* lonestar 2.2 */
369  case 0x70000100: /* gekko 1.0 */
370  case 0x00080100: /* gekko 2.0 */
371  case 0x00083203: /* gekko 2.3a */
372  case 0x00083213: /* gekko 2.3b */
373  case 0x00083204: /* gekko 2.4 */
374  case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
375  case 0x00087200: /* broadway */
376  if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
377  /* Native paired singles */
378  } else if (spr_val & (1 << 29)) { /* HID2.PSE */
379  vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
380  kvmppc_giveup_ext(vcpu, MSR_FP);
381  } else {
382  vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
383  }
384  break;
385  }
386  break;
387  case SPRN_HID4:
388  case SPRN_HID4_GEKKO:
389  to_book3s(vcpu)->hid[4] = spr_val;
390  break;
391  case SPRN_HID5:
392  to_book3s(vcpu)->hid[5] = spr_val;
393  /* guest HID5 set can change is_dcbz32 */
394  if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
395  (mfmsr() & MSR_HV))
396  vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
397  break;
398  case SPRN_GQR0:
399  case SPRN_GQR1:
400  case SPRN_GQR2:
401  case SPRN_GQR3:
402  case SPRN_GQR4:
403  case SPRN_GQR5:
404  case SPRN_GQR6:
405  case SPRN_GQR7:
406  to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
407  break;
408  case SPRN_ICTC:
409  case SPRN_THRM1:
410  case SPRN_THRM2:
411  case SPRN_THRM3:
412  case SPRN_CTRLF:
413  case SPRN_CTRLT:
414  case SPRN_L2CR:
415  case SPRN_MMCR0_GEKKO:
416  case SPRN_MMCR1_GEKKO:
417  case SPRN_PMC1_GEKKO:
418  case SPRN_PMC2_GEKKO:
419  case SPRN_PMC3_GEKKO:
420  case SPRN_PMC4_GEKKO:
421  case SPRN_WPAR_GEKKO:
422  break;
423 unprivileged:
424  default:
425  printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
426 #ifndef DEBUG_SPR
427  emulated = EMULATE_FAIL;
428 #endif
429  break;
430  }
431 
432  return emulated;
433 }
434 
435 int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
436 {
437  int emulated = EMULATE_DONE;
438 
439  switch (sprn) {
440  case SPRN_IBAT0U ... SPRN_IBAT3L:
441  case SPRN_IBAT4U ... SPRN_IBAT7L:
442  case SPRN_DBAT0U ... SPRN_DBAT3L:
443  case SPRN_DBAT4U ... SPRN_DBAT7L:
444  {
445  struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
446 
447  if (sprn % 2)
448  *spr_val = bat->raw >> 32;
449  else
450  *spr_val = bat->raw;
451 
452  break;
453  }
454  case SPRN_SDR1:
455  if (!spr_allowed(vcpu, PRIV_HYPER))
456  goto unprivileged;
457  *spr_val = to_book3s(vcpu)->sdr1;
458  break;
459  case SPRN_DSISR:
460  *spr_val = vcpu->arch.shared->dsisr;
461  break;
462  case SPRN_DAR:
463  *spr_val = vcpu->arch.shared->dar;
464  break;
465  case SPRN_HIOR:
466  *spr_val = to_book3s(vcpu)->hior;
467  break;
468  case SPRN_HID0:
469  *spr_val = to_book3s(vcpu)->hid[0];
470  break;
471  case SPRN_HID1:
472  *spr_val = to_book3s(vcpu)->hid[1];
473  break;
474  case SPRN_HID2:
475  case SPRN_HID2_GEKKO:
476  *spr_val = to_book3s(vcpu)->hid[2];
477  break;
478  case SPRN_HID4:
479  case SPRN_HID4_GEKKO:
480  *spr_val = to_book3s(vcpu)->hid[4];
481  break;
482  case SPRN_HID5:
483  *spr_val = to_book3s(vcpu)->hid[5];
484  break;
485  case SPRN_CFAR:
486  case SPRN_PURR:
487  *spr_val = 0;
488  break;
489  case SPRN_GQR0:
490  case SPRN_GQR1:
491  case SPRN_GQR2:
492  case SPRN_GQR3:
493  case SPRN_GQR4:
494  case SPRN_GQR5:
495  case SPRN_GQR6:
496  case SPRN_GQR7:
497  *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
498  break;
499  case SPRN_THRM1:
500  case SPRN_THRM2:
501  case SPRN_THRM3:
502  case SPRN_CTRLF:
503  case SPRN_CTRLT:
504  case SPRN_L2CR:
505  case SPRN_MMCR0_GEKKO:
506  case SPRN_MMCR1_GEKKO:
507  case SPRN_PMC1_GEKKO:
508  case SPRN_PMC2_GEKKO:
509  case SPRN_PMC3_GEKKO:
510  case SPRN_PMC4_GEKKO:
511  case SPRN_WPAR_GEKKO:
512  *spr_val = 0;
513  break;
514  default:
515 unprivileged:
516  printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
517 #ifndef DEBUG_SPR
518  emulated = EMULATE_FAIL;
519 #endif
520  break;
521  }
522 
523  return emulated;
524 }
525 
526 u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
527 {
528  u32 dsisr = 0;
529 
530  /*
531  * This is what the spec says about DSISR bits (not mentioned = 0):
532  *
533  * 12:13 [DS] Set to bits 30:31
534  * 15:16 [X] Set to bits 29:30
535  * 17 [X] Set to bit 25
536  * [D/DS] Set to bit 5
537  * 18:21 [X] Set to bits 21:24
538  * [D/DS] Set to bits 1:4
539  * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS)
540  * 27:31 Set to bits 11:15 (RA)
541  */
542 
543  switch (get_op(inst)) {
544  /* D-form */
545  case OP_LFS:
546  case OP_LFD:
547  case OP_STFD:
548  case OP_STFS:
549  dsisr |= (inst >> 12) & 0x4000; /* bit 17 */
550  dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */
551  break;
552  /* X-form */
553  case 31:
554  dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */
555  dsisr |= (inst << 8) & 0x04000; /* bit 17 */
556  dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */
557  break;
558  default:
559  printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
560  break;
561  }
562 
563  dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */
564 
565  return dsisr;
566 }
567 
568 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
569 {
570  ulong dar = 0;
571  ulong ra = get_ra(inst);
572  ulong rb = get_rb(inst);
573 
574  switch (get_op(inst)) {
575  case OP_LFS:
576  case OP_LFD:
577  case OP_STFD:
578  case OP_STFS:
579  if (ra)
580  dar = kvmppc_get_gpr(vcpu, ra);
581  dar += (s32)((s16)inst);
582  break;
583  case 31:
584  if (ra)
585  dar = kvmppc_get_gpr(vcpu, ra);
586  dar += kvmppc_get_gpr(vcpu, rb);
587  break;
588  default:
589  printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
590  break;
591  }
592 
593  return dar;
594 }