Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
kvm.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3  * Copyright 2010-2011 Freescale Semiconductor, Inc.
4  *
5  * Authors:
6  * Alexander Graf <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License, version 2, as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20  */
21 
22 #include <linux/kvm_host.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/kvm_para.h>
26 #include <linux/slab.h>
27 #include <linux/of.h>
28 
29 #include <asm/reg.h>
30 #include <asm/sections.h>
31 #include <asm/cacheflush.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include <asm/epapr_hcalls.h>
35 
36 #define KVM_MAGIC_PAGE (-4096L)
37 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
38 
39 #define KVM_INST_LWZ 0x80000000
40 #define KVM_INST_STW 0x90000000
41 #define KVM_INST_LD 0xe8000000
42 #define KVM_INST_STD 0xf8000000
43 #define KVM_INST_NOP 0x60000000
44 #define KVM_INST_B 0x48000000
45 #define KVM_INST_B_MASK 0x03ffffff
46 #define KVM_INST_B_MAX 0x01ffffff
47 #define KVM_INST_LI 0x38000000
48 
49 #define KVM_MASK_RT 0x03e00000
50 #define KVM_RT_30 0x03c00000
51 #define KVM_MASK_RB 0x0000f800
52 #define KVM_INST_MFMSR 0x7c0000a6
53 
54 #define SPR_FROM 0
55 #define SPR_TO 0x100
56 
57 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
58  (((sprn) & 0x1f) << 16) | \
59  (((sprn) & 0x3e0) << 6) | \
60  (moveto))
61 
62 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
63 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
64 
65 #define KVM_INST_TLBSYNC 0x7c00046c
66 #define KVM_INST_MTMSRD_L0 0x7c000164
67 #define KVM_INST_MTMSRD_L1 0x7c010164
68 #define KVM_INST_MTMSR 0x7c000124
69 
70 #define KVM_INST_WRTEE 0x7c000106
71 #define KVM_INST_WRTEEI_0 0x7c000146
72 #define KVM_INST_WRTEEI_1 0x7c008146
73 
74 #define KVM_INST_MTSRIN 0x7c0001e4
75 
76 static bool kvm_patching_worked = true;
77 static char kvm_tmp[1024 * 1024];
78 static int kvm_tmp_index;
79 
80 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
81 {
82  *inst = new_inst;
83  flush_icache_range((ulong)inst, (ulong)inst + 4);
84 }
85 
86 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
87 {
88 #ifdef CONFIG_64BIT
89  kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
90 #else
91  kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
92 #endif
93 }
94 
95 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
96 {
97 #ifdef CONFIG_64BIT
98  kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
99 #else
100  kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
101 #endif
102 }
103 
104 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
105 {
106  kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
107 }
108 
109 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
110 {
111 #ifdef CONFIG_64BIT
112  kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
113 #else
114  kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
115 #endif
116 }
117 
118 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
119 {
120  kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
121 }
122 
123 static void kvm_patch_ins_nop(u32 *inst)
124 {
125  kvm_patch_ins(inst, KVM_INST_NOP);
126 }
127 
128 static void kvm_patch_ins_b(u32 *inst, int addr)
129 {
130 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
131  /* On relocatable kernels interrupts handlers and our code
132  can be in different regions, so we don't patch them */
133 
134  if ((ulong)inst < (ulong)&__end_interrupts)
135  return;
136 #endif
137 
138  kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
139 }
140 
141 static u32 *kvm_alloc(int len)
142 {
143  u32 *p;
144 
145  if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
146  printk(KERN_ERR "KVM: No more space (%d + %d)\n",
147  kvm_tmp_index, len);
148  kvm_patching_worked = false;
149  return NULL;
150  }
151 
152  p = (void*)&kvm_tmp[kvm_tmp_index];
153  kvm_tmp_index += len;
154 
155  return p;
156 }
157 
162 extern u32 kvm_emulate_mtmsrd[];
163 
164 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
165 {
166  u32 *p;
167  int distance_start;
168  int distance_end;
169  ulong next_inst;
170 
171  p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
172  if (!p)
173  return;
174 
175  /* Find out where we are and put everything there */
176  distance_start = (ulong)p - (ulong)inst;
177  next_inst = ((ulong)inst + 4);
178  distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
179 
180  /* Make sure we only write valid b instructions */
181  if (distance_start > KVM_INST_B_MAX) {
182  kvm_patching_worked = false;
183  return;
184  }
185 
186  /* Modify the chunk to fit the invocation */
187  memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
188  p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
189  switch (get_rt(rt)) {
190  case 30:
191  kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
193  break;
194  case 31:
195  kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
197  break;
198  default:
199  p[kvm_emulate_mtmsrd_reg_offs] |= rt;
200  break;
201  }
202 
203  p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
204  flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
205 
206  /* Patch the invocation */
207  kvm_patch_ins_b(inst, distance_start);
208 }
209 
215 extern u32 kvm_emulate_mtmsr[];
216 
217 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
218 {
219  u32 *p;
220  int distance_start;
221  int distance_end;
222  ulong next_inst;
223 
224  p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
225  if (!p)
226  return;
227 
228  /* Find out where we are and put everything there */
229  distance_start = (ulong)p - (ulong)inst;
230  next_inst = ((ulong)inst + 4);
231  distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
232 
233  /* Make sure we only write valid b instructions */
234  if (distance_start > KVM_INST_B_MAX) {
235  kvm_patching_worked = false;
236  return;
237  }
238 
239  /* Modify the chunk to fit the invocation */
240  memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
241  p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
242 
243  /* Make clobbered registers work too */
244  switch (get_rt(rt)) {
245  case 30:
246  kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
248  kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
250  break;
251  case 31:
252  kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
254  kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
256  break;
257  default:
258  p[kvm_emulate_mtmsr_reg1_offs] |= rt;
259  p[kvm_emulate_mtmsr_reg2_offs] |= rt;
260  break;
261  }
262 
263  p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
264  flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
265 
266  /* Patch the invocation */
267  kvm_patch_ins_b(inst, distance_start);
268 }
269 
270 #ifdef CONFIG_BOOKE
271 
272 extern u32 kvm_emulate_wrtee_branch_offs;
273 extern u32 kvm_emulate_wrtee_reg_offs;
274 extern u32 kvm_emulate_wrtee_orig_ins_offs;
275 extern u32 kvm_emulate_wrtee_len;
276 extern u32 kvm_emulate_wrtee[];
277 
278 static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
279 {
280  u32 *p;
281  int distance_start;
282  int distance_end;
283  ulong next_inst;
284 
285  p = kvm_alloc(kvm_emulate_wrtee_len * 4);
286  if (!p)
287  return;
288 
289  /* Find out where we are and put everything there */
290  distance_start = (ulong)p - (ulong)inst;
291  next_inst = ((ulong)inst + 4);
292  distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
293 
294  /* Make sure we only write valid b instructions */
295  if (distance_start > KVM_INST_B_MAX) {
296  kvm_patching_worked = false;
297  return;
298  }
299 
300  /* Modify the chunk to fit the invocation */
301  memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
302  p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
303 
304  if (imm_one) {
305  p[kvm_emulate_wrtee_reg_offs] =
306  KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
307  } else {
308  /* Make clobbered registers work too */
309  switch (get_rt(rt)) {
310  case 30:
311  kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
313  break;
314  case 31:
315  kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
317  break;
318  default:
319  p[kvm_emulate_wrtee_reg_offs] |= rt;
320  break;
321  }
322  }
323 
324  p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
325  flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
326 
327  /* Patch the invocation */
328  kvm_patch_ins_b(inst, distance_start);
329 }
330 
331 extern u32 kvm_emulate_wrteei_0_branch_offs;
332 extern u32 kvm_emulate_wrteei_0_len;
333 extern u32 kvm_emulate_wrteei_0[];
334 
335 static void kvm_patch_ins_wrteei_0(u32 *inst)
336 {
337  u32 *p;
338  int distance_start;
339  int distance_end;
340  ulong next_inst;
341 
342  p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
343  if (!p)
344  return;
345 
346  /* Find out where we are and put everything there */
347  distance_start = (ulong)p - (ulong)inst;
348  next_inst = ((ulong)inst + 4);
349  distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
350 
351  /* Make sure we only write valid b instructions */
352  if (distance_start > KVM_INST_B_MAX) {
353  kvm_patching_worked = false;
354  return;
355  }
356 
357  memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
358  p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
359  flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
360 
361  /* Patch the invocation */
362  kvm_patch_ins_b(inst, distance_start);
363 }
364 
365 #endif
366 
367 #ifdef CONFIG_PPC_BOOK3S_32
368 
369 extern u32 kvm_emulate_mtsrin_branch_offs;
370 extern u32 kvm_emulate_mtsrin_reg1_offs;
371 extern u32 kvm_emulate_mtsrin_reg2_offs;
372 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
373 extern u32 kvm_emulate_mtsrin_len;
374 extern u32 kvm_emulate_mtsrin[];
375 
376 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
377 {
378  u32 *p;
379  int distance_start;
380  int distance_end;
381  ulong next_inst;
382 
383  p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
384  if (!p)
385  return;
386 
387  /* Find out where we are and put everything there */
388  distance_start = (ulong)p - (ulong)inst;
389  next_inst = ((ulong)inst + 4);
390  distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
391 
392  /* Make sure we only write valid b instructions */
393  if (distance_start > KVM_INST_B_MAX) {
394  kvm_patching_worked = false;
395  return;
396  }
397 
398  /* Modify the chunk to fit the invocation */
399  memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
400  p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
401  p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
402  p[kvm_emulate_mtsrin_reg2_offs] |= rt;
403  p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
404  flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
405 
406  /* Patch the invocation */
407  kvm_patch_ins_b(inst, distance_start);
408 }
409 
410 #endif
411 
412 static void kvm_map_magic_page(void *data)
413 {
414  u32 *features = data;
415 
416  ulong in[8];
417  ulong out[8];
418 
419  in[0] = KVM_MAGIC_PAGE;
420  in[1] = KVM_MAGIC_PAGE;
421 
423 
424  *features = out[0];
425 }
426 
427 static void kvm_check_ins(u32 *inst, u32 features)
428 {
429  u32 _inst = *inst;
430  u32 inst_no_rt = _inst & ~KVM_MASK_RT;
431  u32 inst_rt = _inst & KVM_MASK_RT;
432 
433  switch (inst_no_rt) {
434  /* Loads */
435  case KVM_INST_MFMSR:
436  kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
437  break;
438  case KVM_INST_MFSPR(SPRN_SPRG0):
439  kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
440  break;
441  case KVM_INST_MFSPR(SPRN_SPRG1):
442  kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
443  break;
444  case KVM_INST_MFSPR(SPRN_SPRG2):
445  kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
446  break;
447  case KVM_INST_MFSPR(SPRN_SPRG3):
448  kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
449  break;
450  case KVM_INST_MFSPR(SPRN_SRR0):
451  kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
452  break;
453  case KVM_INST_MFSPR(SPRN_SRR1):
454  kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
455  break;
456 #ifdef CONFIG_BOOKE
457  case KVM_INST_MFSPR(SPRN_DEAR):
458 #else
459  case KVM_INST_MFSPR(SPRN_DAR):
460 #endif
461  kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
462  break;
463  case KVM_INST_MFSPR(SPRN_DSISR):
464  kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
465  break;
466 
467 #ifdef CONFIG_PPC_BOOK3E_MMU
468  case KVM_INST_MFSPR(SPRN_MAS0):
469  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
470  kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
471  break;
472  case KVM_INST_MFSPR(SPRN_MAS1):
473  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
474  kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
475  break;
476  case KVM_INST_MFSPR(SPRN_MAS2):
477  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
478  kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
479  break;
480  case KVM_INST_MFSPR(SPRN_MAS3):
481  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
482  kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
483  break;
484  case KVM_INST_MFSPR(SPRN_MAS4):
485  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
486  kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
487  break;
488  case KVM_INST_MFSPR(SPRN_MAS6):
489  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
490  kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
491  break;
492  case KVM_INST_MFSPR(SPRN_MAS7):
493  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
494  kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
495  break;
496 #endif /* CONFIG_PPC_BOOK3E_MMU */
497 
498  case KVM_INST_MFSPR(SPRN_SPRG4):
499 #ifdef CONFIG_BOOKE
500  case KVM_INST_MFSPR(SPRN_SPRG4R):
501 #endif
502  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
503  kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
504  break;
505  case KVM_INST_MFSPR(SPRN_SPRG5):
506 #ifdef CONFIG_BOOKE
507  case KVM_INST_MFSPR(SPRN_SPRG5R):
508 #endif
509  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
510  kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
511  break;
512  case KVM_INST_MFSPR(SPRN_SPRG6):
513 #ifdef CONFIG_BOOKE
514  case KVM_INST_MFSPR(SPRN_SPRG6R):
515 #endif
516  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
517  kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
518  break;
519  case KVM_INST_MFSPR(SPRN_SPRG7):
520 #ifdef CONFIG_BOOKE
521  case KVM_INST_MFSPR(SPRN_SPRG7R):
522 #endif
523  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
524  kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
525  break;
526 
527 #ifdef CONFIG_BOOKE
528  case KVM_INST_MFSPR(SPRN_ESR):
529  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
530  kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
531  break;
532 #endif
533 
534  case KVM_INST_MFSPR(SPRN_PIR):
535  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
536  kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
537  break;
538 
539 
540  /* Stores */
541  case KVM_INST_MTSPR(SPRN_SPRG0):
542  kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
543  break;
544  case KVM_INST_MTSPR(SPRN_SPRG1):
545  kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
546  break;
547  case KVM_INST_MTSPR(SPRN_SPRG2):
548  kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
549  break;
550  case KVM_INST_MTSPR(SPRN_SPRG3):
551  kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
552  break;
553  case KVM_INST_MTSPR(SPRN_SRR0):
554  kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
555  break;
556  case KVM_INST_MTSPR(SPRN_SRR1):
557  kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
558  break;
559 #ifdef CONFIG_BOOKE
560  case KVM_INST_MTSPR(SPRN_DEAR):
561 #else
562  case KVM_INST_MTSPR(SPRN_DAR):
563 #endif
564  kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
565  break;
566  case KVM_INST_MTSPR(SPRN_DSISR):
567  kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
568  break;
569 #ifdef CONFIG_PPC_BOOK3E_MMU
570  case KVM_INST_MTSPR(SPRN_MAS0):
571  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
572  kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
573  break;
574  case KVM_INST_MTSPR(SPRN_MAS1):
575  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
576  kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
577  break;
578  case KVM_INST_MTSPR(SPRN_MAS2):
579  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
580  kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
581  break;
582  case KVM_INST_MTSPR(SPRN_MAS3):
583  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
584  kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
585  break;
586  case KVM_INST_MTSPR(SPRN_MAS4):
587  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
588  kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
589  break;
590  case KVM_INST_MTSPR(SPRN_MAS6):
591  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
592  kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
593  break;
594  case KVM_INST_MTSPR(SPRN_MAS7):
595  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
596  kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
597  break;
598 #endif /* CONFIG_PPC_BOOK3E_MMU */
599 
600  case KVM_INST_MTSPR(SPRN_SPRG4):
601  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
602  kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
603  break;
604  case KVM_INST_MTSPR(SPRN_SPRG5):
605  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
606  kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
607  break;
608  case KVM_INST_MTSPR(SPRN_SPRG6):
609  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
610  kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
611  break;
612  case KVM_INST_MTSPR(SPRN_SPRG7):
613  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
614  kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
615  break;
616 
617 #ifdef CONFIG_BOOKE
618  case KVM_INST_MTSPR(SPRN_ESR):
619  if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
620  kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
621  break;
622 #endif
623 
624  /* Nops */
625  case KVM_INST_TLBSYNC:
626  kvm_patch_ins_nop(inst);
627  break;
628 
629  /* Rewrites */
630  case KVM_INST_MTMSRD_L1:
631  kvm_patch_ins_mtmsrd(inst, inst_rt);
632  break;
633  case KVM_INST_MTMSR:
634  case KVM_INST_MTMSRD_L0:
635  kvm_patch_ins_mtmsr(inst, inst_rt);
636  break;
637 #ifdef CONFIG_BOOKE
638  case KVM_INST_WRTEE:
639  kvm_patch_ins_wrtee(inst, inst_rt, 0);
640  break;
641 #endif
642  }
643 
644  switch (inst_no_rt & ~KVM_MASK_RB) {
645 #ifdef CONFIG_PPC_BOOK3S_32
646  case KVM_INST_MTSRIN:
647  if (features & KVM_MAGIC_FEAT_SR) {
648  u32 inst_rb = _inst & KVM_MASK_RB;
649  kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
650  }
651  break;
652  break;
653 #endif
654  }
655 
656  switch (_inst) {
657 #ifdef CONFIG_BOOKE
658  case KVM_INST_WRTEEI_0:
659  kvm_patch_ins_wrteei_0(inst);
660  break;
661 
662  case KVM_INST_WRTEEI_1:
663  kvm_patch_ins_wrtee(inst, 0, 1);
664  break;
665 #endif
666  }
667 }
668 
669 extern u32 kvm_template_start[];
670 extern u32 kvm_template_end[];
671 
672 static void kvm_use_magic_page(void)
673 {
674  u32 *p;
675  u32 *start, *end;
676  u32 tmp;
677  u32 features;
678 
679  /* Tell the host to map the magic page to -4096 on all CPUs */
680  on_each_cpu(kvm_map_magic_page, &features, 1);
681 
682  /* Quick self-test to see if the mapping works */
683  if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
684  kvm_patching_worked = false;
685  return;
686  }
687 
688  /* Now loop through all code and find instructions */
689  start = (void*)_stext;
690  end = (void*)_etext;
691 
692  /*
693  * Being interrupted in the middle of patching would
694  * be bad for SPRG4-7, which KVM can't keep in sync
695  * with emulated accesses because reads don't trap.
696  */
698 
699  for (p = start; p < end; p++) {
700  /* Avoid patching the template code */
701  if (p >= kvm_template_start && p < kvm_template_end) {
702  p = kvm_template_end - 1;
703  continue;
704  }
705  kvm_check_ins(p, features);
706  }
707 
709 
710  printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
711  kvm_patching_worked ? "worked" : "failed");
712 }
713 
714 unsigned long kvm_hypercall(unsigned long *in,
715  unsigned long *out,
716  unsigned long nr)
717 {
718  unsigned long register r0 asm("r0");
719  unsigned long register r3 asm("r3") = in[0];
720  unsigned long register r4 asm("r4") = in[1];
721  unsigned long register r5 asm("r5") = in[2];
722  unsigned long register r6 asm("r6") = in[3];
723  unsigned long register r7 asm("r7") = in[4];
724  unsigned long register r8 asm("r8") = in[5];
725  unsigned long register r9 asm("r9") = in[6];
726  unsigned long register r10 asm("r10") = in[7];
727  unsigned long register r11 asm("r11") = nr;
728  unsigned long register r12 asm("r12");
729 
730  asm volatile("bl epapr_hypercall_start"
731  : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
732  "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
733  "=r"(r12)
734  : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
735  "r"(r9), "r"(r10), "r"(r11)
736  : "memory", "cc", "xer", "ctr", "lr");
737 
738  out[0] = r4;
739  out[1] = r5;
740  out[2] = r6;
741  out[3] = r7;
742  out[4] = r8;
743  out[5] = r9;
744  out[6] = r10;
745  out[7] = r11;
746 
747  return r3;
748 }
750 
751 static __init void kvm_free_tmp(void)
752 {
753  unsigned long start, end;
754 
755  start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
756  end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
757 
758  /* Free the tmp space we don't need */
759  for (; start < end; start += PAGE_SIZE) {
760  ClearPageReserved(virt_to_page(start));
761  init_page_count(virt_to_page(start));
762  free_page(start);
763  totalram_pages++;
764  }
765 }
766 
767 static int __init kvm_guest_init(void)
768 {
769  if (!kvm_para_available())
770  goto free_tmp;
771 
773  goto free_tmp;
774 
775  if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
776  kvm_use_magic_page();
777 
778 #ifdef CONFIG_PPC_BOOK3S_64
779  /* Enable napping */
780  powersave_nap = 1;
781 #endif
782 
783 free_tmp:
784  kvm_free_tmp();
785 
786  return 0;
787 }
788