Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
paravirt.h
Go to the documentation of this file.
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5 
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/pgtable_types.h>
8 #include <asm/asm.h>
9 
10 #include <asm/paravirt_types.h>
11 
12 #ifndef __ASSEMBLY__
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16 
17 static inline int paravirt_enabled(void)
18 {
20 }
21 
22 static inline void load_sp0(struct tss_struct *tss,
23  struct thread_struct *thread)
24 {
25  PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
26 }
27 
28 /* The paravirtualized CPUID instruction. */
29 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
30  unsigned int *ecx, unsigned int *edx)
31 {
32  PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
33 }
34 
35 /*
36  * These special macros can be used to get or set a debugging register
37  */
38 static inline unsigned long paravirt_get_debugreg(int reg)
39 {
40  return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
41 }
42 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
43 static inline void set_debugreg(unsigned long val, int reg)
44 {
46 }
47 
48 static inline void clts(void)
49 {
51 }
52 
53 static inline unsigned long read_cr0(void)
54 {
55  return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
56 }
57 
58 static inline void write_cr0(unsigned long x)
59 {
61 }
62 
63 static inline unsigned long read_cr2(void)
64 {
65  return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
66 }
67 
68 static inline void write_cr2(unsigned long x)
69 {
71 }
72 
73 static inline unsigned long read_cr3(void)
74 {
75  return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
76 }
77 
78 static inline void write_cr3(unsigned long x)
79 {
81 }
82 
83 static inline unsigned long read_cr4(void)
84 {
85  return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
86 }
87 static inline unsigned long read_cr4_safe(void)
88 {
89  return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
90 }
91 
92 static inline void write_cr4(unsigned long x)
93 {
95 }
96 
97 #ifdef CONFIG_X86_64
98 static inline unsigned long read_cr8(void)
99 {
100  return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
101 }
102 
103 static inline void write_cr8(unsigned long x)
104 {
105  PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
106 }
107 #endif
108 
109 static inline void arch_safe_halt(void)
110 {
112 }
113 
114 static inline void halt(void)
115 {
117 }
118 
119 static inline void wbinvd(void)
120 {
122 }
123 
124 #define get_kernel_rpl() (pv_info.kernel_rpl)
125 
126 static inline u64 paravirt_read_msr(unsigned msr, int *err)
127 {
128  return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
129 }
130 
131 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
132 {
133  return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
134 }
135 
136 /* These should all do BUG_ON(_err), but our headers are too tangled. */
137 #define rdmsr(msr, val1, val2) \
138 do { \
139  int _err; \
140  u64 _l = paravirt_read_msr(msr, &_err); \
141  val1 = (u32)_l; \
142  val2 = _l >> 32; \
143 } while (0)
144 
145 #define wrmsr(msr, val1, val2) \
146 do { \
147  paravirt_write_msr(msr, val1, val2); \
148 } while (0)
149 
150 #define rdmsrl(msr, val) \
151 do { \
152  int _err; \
153  val = paravirt_read_msr(msr, &_err); \
154 } while (0)
155 
156 #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
157 #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
158 
159 /* rdmsr with exception handling */
160 #define rdmsr_safe(msr, a, b) \
161 ({ \
162  int _err; \
163  u64 _l = paravirt_read_msr(msr, &_err); \
164  (*a) = (u32)_l; \
165  (*b) = _l >> 32; \
166  _err; \
167 })
168 
169 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
170 {
171  int err;
172 
173  *p = paravirt_read_msr(msr, &err);
174  return err;
175 }
176 
177 static inline u64 paravirt_read_tsc(void)
178 {
180 }
181 
182 #define rdtscl(low) \
183 do { \
184  u64 _l = paravirt_read_tsc(); \
185  low = (int)_l; \
186 } while (0)
187 
188 #define rdtscll(val) (val = paravirt_read_tsc())
189 
190 static inline unsigned long long paravirt_sched_clock(void)
191 {
192  return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
193 }
194 
195 struct static_key;
196 extern struct static_key paravirt_steal_enabled;
198 
199 static inline u64 paravirt_steal_clock(int cpu)
200 {
201  return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
202 }
203 
204 static inline unsigned long long paravirt_read_pmc(int counter)
205 {
206  return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
207 }
208 
209 #define rdpmc(counter, low, high) \
210 do { \
211  u64 _l = paravirt_read_pmc(counter); \
212  low = (u32)_l; \
213  high = _l >> 32; \
214 } while (0)
215 
216 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
217 
218 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
219 {
220  return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
221 }
222 
223 #define rdtscp(low, high, aux) \
224 do { \
225  int __aux; \
226  unsigned long __val = paravirt_rdtscp(&__aux); \
227  (low) = (u32)__val; \
228  (high) = (u32)(__val >> 32); \
229  (aux) = __aux; \
230 } while (0)
231 
232 #define rdtscpll(val, aux) \
233 do { \
234  unsigned long __aux; \
235  val = paravirt_rdtscp(&__aux); \
236  (aux) = __aux; \
237 } while (0)
238 
239 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
240 {
241  PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
242 }
243 
244 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
245 {
246  PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
247 }
248 
249 static inline void load_TR_desc(void)
250 {
252 }
253 static inline void load_gdt(const struct desc_ptr *dtr)
254 {
256 }
257 static inline void load_idt(const struct desc_ptr *dtr)
258 {
260 }
261 static inline void set_ldt(const void *addr, unsigned entries)
262 {
263  PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
264 }
265 static inline void store_gdt(struct desc_ptr *dtr)
266 {
268 }
269 static inline void store_idt(struct desc_ptr *dtr)
270 {
272 }
273 static inline unsigned long paravirt_store_tr(void)
274 {
275  return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
276 }
277 #define store_tr(tr) ((tr) = paravirt_store_tr())
278 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
279 {
281 }
282 
283 #ifdef CONFIG_X86_64
284 static inline void load_gs_index(unsigned int gs)
285 {
286  PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
287 }
288 #endif
289 
290 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
291  const void *desc)
292 {
293  PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
294 }
295 
296 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
297  void *desc, int type)
298 {
299  PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
300 }
301 
302 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
303 {
304  PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
305 }
306 static inline void set_iopl_mask(unsigned mask)
307 {
309 }
310 
311 /* The paravirtualized I/O functions */
312 static inline void slow_down_io(void)
313 {
315 #ifdef REALLY_SLOW_IO
319 #endif
320 }
321 
322 #ifdef CONFIG_SMP
323 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
324  unsigned long start_esp)
325 {
326  PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
327  phys_apicid, start_eip, start_esp);
328 }
329 #endif
330 
331 static inline void paravirt_activate_mm(struct mm_struct *prev,
332  struct mm_struct *next)
333 {
334  PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
335 }
336 
337 static inline void arch_dup_mmap(struct mm_struct *oldmm,
338  struct mm_struct *mm)
339 {
340  PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
341 }
342 
343 static inline void arch_exit_mmap(struct mm_struct *mm)
344 {
346 }
347 
348 static inline void __flush_tlb(void)
349 {
351 }
352 static inline void __flush_tlb_global(void)
353 {
355 }
356 static inline void __flush_tlb_single(unsigned long addr)
357 {
359 }
360 
361 static inline void flush_tlb_others(const struct cpumask *cpumask,
362  struct mm_struct *mm,
363  unsigned long start,
364  unsigned long end)
365 {
366  PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
367 }
368 
369 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
370 {
371  return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
372 }
373 
374 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
375 {
376  PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
377 }
378 
379 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
380 {
381  PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
382 }
383 static inline void paravirt_release_pte(unsigned long pfn)
384 {
386 }
387 
388 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
389 {
390  PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
391 }
392 
393 static inline void paravirt_release_pmd(unsigned long pfn)
394 {
396 }
397 
398 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
399 {
400  PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
401 }
402 static inline void paravirt_release_pud(unsigned long pfn)
403 {
405 }
406 
407 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
408  pte_t *ptep)
409 {
410  PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
411 }
412 static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
413  pmd_t *pmdp)
414 {
415  PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
416 }
417 
418 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
419  pte_t *ptep)
420 {
421  PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
422 }
423 
424 static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
425  pmd_t *pmdp)
426 {
427  PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
428 }
429 
430 static inline pte_t __pte(pteval_t val)
431 {
432  pteval_t ret;
433 
434  if (sizeof(pteval_t) > sizeof(long))
435  ret = PVOP_CALLEE2(pteval_t,
437  val, (u64)val >> 32);
438  else
439  ret = PVOP_CALLEE1(pteval_t,
441  val);
442 
443  return (pte_t) { .pte = ret };
444 }
445 
446 static inline pteval_t pte_val(pte_t pte)
447 {
448  pteval_t ret;
449 
450  if (sizeof(pteval_t) > sizeof(long))
452  pte.pte, (u64)pte.pte >> 32);
453  else
455  pte.pte);
456 
457  return ret;
458 }
459 
460 static inline pgd_t __pgd(pgdval_t val)
461 {
462  pgdval_t ret;
463 
464  if (sizeof(pgdval_t) > sizeof(long))
466  val, (u64)val >> 32);
467  else
469  val);
470 
471  return (pgd_t) { ret };
472 }
473 
474 static inline pgdval_t pgd_val(pgd_t pgd)
475 {
476  pgdval_t ret;
477 
478  if (sizeof(pgdval_t) > sizeof(long))
480  pgd.pgd, (u64)pgd.pgd >> 32);
481  else
483  pgd.pgd);
484 
485  return ret;
486 }
487 
488 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
489 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
490  pte_t *ptep)
491 {
492  pteval_t ret;
493 
495  mm, addr, ptep);
496 
497  return (pte_t) { .pte = ret };
498 }
499 
500 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
501  pte_t *ptep, pte_t pte)
502 {
503  if (sizeof(pteval_t) > sizeof(long))
504  /* 5 arg words */
505  pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
506  else
508  mm, addr, ptep, pte.pte);
509 }
510 
511 static inline void set_pte(pte_t *ptep, pte_t pte)
512 {
513  if (sizeof(pteval_t) > sizeof(long))
515  pte.pte, (u64)pte.pte >> 32);
516  else
518  pte.pte);
519 }
520 
521 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
522  pte_t *ptep, pte_t pte)
523 {
524  if (sizeof(pteval_t) > sizeof(long))
525  /* 5 arg words */
526  pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
527  else
528  PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
529 }
530 
531 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
532 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
533  pmd_t *pmdp, pmd_t pmd)
534 {
535  if (sizeof(pmdval_t) > sizeof(long))
536  /* 5 arg words */
537  pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
538  else
539  PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
540  native_pmd_val(pmd));
541 }
542 #endif
543 
544 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
545 {
546  pmdval_t val = native_pmd_val(pmd);
547 
548  if (sizeof(pmdval_t) > sizeof(long))
549  PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
550  else
551  PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
552 }
553 
554 #if PAGETABLE_LEVELS >= 3
555 static inline pmd_t __pmd(pmdval_t val)
556 {
557  pmdval_t ret;
558 
559  if (sizeof(pmdval_t) > sizeof(long))
560  ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
561  val, (u64)val >> 32);
562  else
563  ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
564  val);
565 
566  return (pmd_t) { ret };
567 }
568 
569 static inline pmdval_t pmd_val(pmd_t pmd)
570 {
571  pmdval_t ret;
572 
573  if (sizeof(pmdval_t) > sizeof(long))
574  ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
575  pmd.pmd, (u64)pmd.pmd >> 32);
576  else
577  ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
578  pmd.pmd);
579 
580  return ret;
581 }
582 
583 static inline void set_pud(pud_t *pudp, pud_t pud)
584 {
585  pudval_t val = native_pud_val(pud);
586 
587  if (sizeof(pudval_t) > sizeof(long))
588  PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
589  val, (u64)val >> 32);
590  else
591  PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
592  val);
593 }
594 #if PAGETABLE_LEVELS == 4
595 static inline pud_t __pud(pudval_t val)
596 {
597  pudval_t ret;
598 
599  if (sizeof(pudval_t) > sizeof(long))
600  ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
601  val, (u64)val >> 32);
602  else
603  ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
604  val);
605 
606  return (pud_t) { ret };
607 }
608 
609 static inline pudval_t pud_val(pud_t pud)
610 {
611  pudval_t ret;
612 
613  if (sizeof(pudval_t) > sizeof(long))
614  ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
615  pud.pud, (u64)pud.pud >> 32);
616  else
617  ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
618  pud.pud);
619 
620  return ret;
621 }
622 
623 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
624 {
625  pgdval_t val = native_pgd_val(pgd);
626 
627  if (sizeof(pgdval_t) > sizeof(long))
628  PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
629  val, (u64)val >> 32);
630  else
631  PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
632  val);
633 }
634 
635 static inline void pgd_clear(pgd_t *pgdp)
636 {
637  set_pgd(pgdp, __pgd(0));
638 }
639 
640 static inline void pud_clear(pud_t *pudp)
641 {
642  set_pud(pudp, __pud(0));
643 }
644 
645 #endif /* PAGETABLE_LEVELS == 4 */
646 
647 #endif /* PAGETABLE_LEVELS >= 3 */
648 
649 #ifdef CONFIG_X86_PAE
650 /* Special-case pte-setting operations for PAE, which can't update a
651  64-bit pte atomically */
652 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
653 {
654  PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
655  pte.pte, pte.pte >> 32);
656 }
657 
658 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
659  pte_t *ptep)
660 {
661  PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
662 }
663 
664 static inline void pmd_clear(pmd_t *pmdp)
665 {
666  PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
667 }
668 #else /* !CONFIG_X86_PAE */
669 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
670 {
671  set_pte(ptep, pte);
672 }
673 
674 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
675  pte_t *ptep)
676 {
677  set_pte_at(mm, addr, ptep, __pte(0));
678 }
679 
680 static inline void pmd_clear(pmd_t *pmdp)
681 {
682  set_pmd(pmdp, __pmd(0));
683 }
684 #endif /* CONFIG_X86_PAE */
685 
686 #define __HAVE_ARCH_START_CONTEXT_SWITCH
687 static inline void arch_start_context_switch(struct task_struct *prev)
688 {
690 }
691 
692 static inline void arch_end_context_switch(struct task_struct *next)
693 {
695 }
696 
697 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
698 static inline void arch_enter_lazy_mmu_mode(void)
699 {
701 }
702 
703 static inline void arch_leave_lazy_mmu_mode(void)
704 {
706 }
707 
708 void arch_flush_lazy_mmu_mode(void);
709 
710 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
712 {
713  pv_mmu_ops.set_fixmap(idx, phys, flags);
714 }
715 
716 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
717 
718 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
719 {
720  return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
721 }
722 
723 static inline int arch_spin_is_contended(struct arch_spinlock *lock)
724 {
725  return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
726 }
727 #define arch_spin_is_contended arch_spin_is_contended
728 
729 static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
730 {
732 }
733 
734 static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
735  unsigned long flags)
736 {
738 }
739 
740 static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
741 {
742  return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
743 }
744 
745 static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
746 {
748 }
749 
750 #endif
751 
752 #ifdef CONFIG_X86_32
753 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
754 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
755 
756 /* save and restore all caller-save registers, except return value */
757 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
758 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
759 
760 #define PV_FLAGS_ARG "0"
761 #define PV_EXTRA_CLOBBERS
762 #define PV_VEXTRA_CLOBBERS
763 #else
764 /* save and restore all caller-save registers, except return value */
765 #define PV_SAVE_ALL_CALLER_REGS \
766  "push %rcx;" \
767  "push %rdx;" \
768  "push %rsi;" \
769  "push %rdi;" \
770  "push %r8;" \
771  "push %r9;" \
772  "push %r10;" \
773  "push %r11;"
774 #define PV_RESTORE_ALL_CALLER_REGS \
775  "pop %r11;" \
776  "pop %r10;" \
777  "pop %r9;" \
778  "pop %r8;" \
779  "pop %rdi;" \
780  "pop %rsi;" \
781  "pop %rdx;" \
782  "pop %rcx;"
783 
784 /* We save some registers, but all of them, that's too much. We clobber all
785  * caller saved registers but the argument parameter */
786 #define PV_SAVE_REGS "pushq %%rdi;"
787 #define PV_RESTORE_REGS "popq %%rdi;"
788 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
789 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
790 #define PV_FLAGS_ARG "D"
791 #endif
792 
793 /*
794  * Generate a thunk around a function which saves all caller-save
795  * registers except for the return value. This allows C functions to
796  * be called from assembler code where fewer than normal registers are
797  * available. It may also help code generation around calls from C
798  * code if the common case doesn't use many registers.
799  *
800  * When a callee is wrapped in a thunk, the caller can assume that all
801  * arg regs and all scratch registers are preserved across the
802  * call. The return value in rax/eax will not be saved, even for void
803  * functions.
804  */
805 #define PV_CALLEE_SAVE_REGS_THUNK(func) \
806  extern typeof(func) __raw_callee_save_##func; \
807  static void *__##func##__ __used = func; \
808  \
809  asm(".pushsection .text;" \
810  "__raw_callee_save_" #func ": " \
811  PV_SAVE_ALL_CALLER_REGS \
812  "call " #func ";" \
813  PV_RESTORE_ALL_CALLER_REGS \
814  "ret;" \
815  ".popsection")
816 
817 /* Get a reference to a callee-save function */
818 #define PV_CALLEE_SAVE(func) \
819  ((struct paravirt_callee_save) { __raw_callee_save_##func })
820 
821 /* Promise that "func" already uses the right calling convention */
822 #define __PV_IS_CALLEE_SAVE(func) \
823  ((struct paravirt_callee_save) { func })
824 
825 static inline notrace unsigned long arch_local_save_flags(void)
826 {
827  return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
828 }
829 
830 static inline notrace void arch_local_irq_restore(unsigned long f)
831 {
833 }
834 
835 static inline notrace void arch_local_irq_disable(void)
836 {
838 }
839 
840 static inline notrace void arch_local_irq_enable(void)
841 {
843 }
844 
845 static inline notrace unsigned long arch_local_irq_save(void)
846 {
847  unsigned long f;
848 
849  f = arch_local_save_flags();
851  return f;
852 }
853 
854 
855 /* Make sure as little as possible of this mess escapes. */
856 #undef PARAVIRT_CALL
857 #undef __PVOP_CALL
858 #undef __PVOP_VCALL
859 #undef PVOP_VCALL0
860 #undef PVOP_CALL0
861 #undef PVOP_VCALL1
862 #undef PVOP_CALL1
863 #undef PVOP_VCALL2
864 #undef PVOP_CALL2
865 #undef PVOP_VCALL3
866 #undef PVOP_CALL3
867 #undef PVOP_VCALL4
868 #undef PVOP_CALL4
869 
870 extern void default_banner(void);
871 
872 #else /* __ASSEMBLY__ */
873 
874 #define _PVSITE(ptype, clobbers, ops, word, algn) \
875 771:; \
876  ops; \
877 772:; \
878  .pushsection .parainstructions,"a"; \
879  .align algn; \
880  word 771b; \
881  .byte ptype; \
882  .byte 772b-771b; \
883  .short clobbers; \
884  .popsection
885 
886 
887 #define COND_PUSH(set, mask, reg) \
888  .if ((~(set)) & mask); push %reg; .endif
889 #define COND_POP(set, mask, reg) \
890  .if ((~(set)) & mask); pop %reg; .endif
891 
892 #ifdef CONFIG_X86_64
893 
894 #define PV_SAVE_REGS(set) \
895  COND_PUSH(set, CLBR_RAX, rax); \
896  COND_PUSH(set, CLBR_RCX, rcx); \
897  COND_PUSH(set, CLBR_RDX, rdx); \
898  COND_PUSH(set, CLBR_RSI, rsi); \
899  COND_PUSH(set, CLBR_RDI, rdi); \
900  COND_PUSH(set, CLBR_R8, r8); \
901  COND_PUSH(set, CLBR_R9, r9); \
902  COND_PUSH(set, CLBR_R10, r10); \
903  COND_PUSH(set, CLBR_R11, r11)
904 #define PV_RESTORE_REGS(set) \
905  COND_POP(set, CLBR_R11, r11); \
906  COND_POP(set, CLBR_R10, r10); \
907  COND_POP(set, CLBR_R9, r9); \
908  COND_POP(set, CLBR_R8, r8); \
909  COND_POP(set, CLBR_RDI, rdi); \
910  COND_POP(set, CLBR_RSI, rsi); \
911  COND_POP(set, CLBR_RDX, rdx); \
912  COND_POP(set, CLBR_RCX, rcx); \
913  COND_POP(set, CLBR_RAX, rax)
914 
915 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
916 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
917 #define PARA_INDIRECT(addr) *addr(%rip)
918 #else
919 #define PV_SAVE_REGS(set) \
920  COND_PUSH(set, CLBR_EAX, eax); \
921  COND_PUSH(set, CLBR_EDI, edi); \
922  COND_PUSH(set, CLBR_ECX, ecx); \
923  COND_PUSH(set, CLBR_EDX, edx)
924 #define PV_RESTORE_REGS(set) \
925  COND_POP(set, CLBR_EDX, edx); \
926  COND_POP(set, CLBR_ECX, ecx); \
927  COND_POP(set, CLBR_EDI, edi); \
928  COND_POP(set, CLBR_EAX, eax)
929 
930 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
931 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
932 #define PARA_INDIRECT(addr) *%cs:addr
933 #endif
934 
935 #define INTERRUPT_RETURN \
936  PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
937  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
938 
939 #define DISABLE_INTERRUPTS(clobbers) \
940  PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
941  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
942  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
943  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
944 
945 #define ENABLE_INTERRUPTS(clobbers) \
946  PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
947  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
948  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
949  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
950 
951 #define USERGS_SYSRET32 \
952  PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
953  CLBR_NONE, \
954  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
955 
956 #ifdef CONFIG_X86_32
957 #define GET_CR0_INTO_EAX \
958  push %ecx; push %edx; \
959  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
960  pop %edx; pop %ecx
961 
962 #define ENABLE_INTERRUPTS_SYSEXIT \
963  PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
964  CLBR_NONE, \
965  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
966 
967 
968 #else /* !CONFIG_X86_32 */
969 
970 /*
971  * If swapgs is used while the userspace stack is still current,
972  * there's no way to call a pvop. The PV replacement *must* be
973  * inlined, or the swapgs instruction must be trapped and emulated.
974  */
975 #define SWAPGS_UNSAFE_STACK \
976  PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
977  swapgs)
978 
979 /*
980  * Note: swapgs is very special, and in practise is either going to be
981  * implemented with a single "swapgs" instruction or something very
982  * special. Either way, we don't need to save any registers for
983  * it.
984  */
985 #define SWAPGS \
986  PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
987  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
988  )
989 
990 #define GET_CR2_INTO_RAX \
991  call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
992 
993 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
994  PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
995  CLBR_NONE, \
996  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
997 
998 #define USERGS_SYSRET64 \
999  PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1000  CLBR_NONE, \
1001  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1002 
1003 #define ENABLE_INTERRUPTS_SYSEXIT32 \
1004  PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1005  CLBR_NONE, \
1006  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1007 #endif /* CONFIG_X86_32 */
1008 
1009 #endif /* __ASSEMBLY__ */
1010 #else /* CONFIG_PARAVIRT */
1011 # define default_banner x86_init_noop
1012 #endif /* !CONFIG_PARAVIRT */
1013 #endif /* _ASM_X86_PARAVIRT_H */