1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
14 #include <linux/types.h>
29 static inline void __cpuid(
unsigned int *eax,
unsigned int *
ebx,
30 unsigned int *
ecx,
unsigned int *edx)
38 static inline unsigned long paravirt_get_debugreg(
int reg)
42 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
43 static inline void set_debugreg(
unsigned long val,
int reg)
48 static inline void clts(
void)
53 static inline unsigned long read_cr0(
void)
58 static inline void write_cr0(
unsigned long x)
63 static inline unsigned long read_cr2(
void)
68 static inline void write_cr2(
unsigned long x)
73 static inline unsigned long read_cr3(
void)
78 static inline void write_cr3(
unsigned long x)
83 static inline unsigned long read_cr4(
void)
87 static inline unsigned long read_cr4_safe(
void)
92 static inline void write_cr4(
unsigned long x)
98 static inline unsigned long read_cr8(
void)
103 static inline void write_cr8(
unsigned long x)
109 static inline void arch_safe_halt(
void)
114 static inline void halt(
void)
119 static inline void wbinvd(
void)
124 #define get_kernel_rpl() (pv_info.kernel_rpl)
126 static inline u64 paravirt_read_msr(
unsigned msr,
int *
err)
131 static inline int paravirt_write_msr(
unsigned msr,
unsigned low,
unsigned high)
137 #define rdmsr(msr, val1, val2) \
140 u64 _l = paravirt_read_msr(msr, &_err); \
145 #define wrmsr(msr, val1, val2) \
147 paravirt_write_msr(msr, val1, val2); \
150 #define rdmsrl(msr, val) \
153 val = paravirt_read_msr(msr, &_err); \
156 #define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
157 #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
160 #define rdmsr_safe(msr, a, b) \
163 u64 _l = paravirt_read_msr(msr, &_err); \
169 static inline int rdmsrl_safe(
unsigned msr,
unsigned long long *
p)
173 *p = paravirt_read_msr(msr, &err);
177 static inline u64 paravirt_read_tsc(
void)
182 #define rdtscl(low) \
184 u64 _l = paravirt_read_tsc(); \
188 #define rdtscll(val) (val = paravirt_read_tsc())
190 static inline unsigned long long paravirt_sched_clock(
void)
199 static inline u64 paravirt_steal_clock(
int cpu)
204 static inline unsigned long long paravirt_read_pmc(
int counter)
209 #define rdpmc(counter, low, high) \
211 u64 _l = paravirt_read_pmc(counter); \
216 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
218 static inline unsigned long long paravirt_rdtscp(
unsigned int *
aux)
223 #define rdtscp(low, high, aux) \
226 unsigned long __val = paravirt_rdtscp(&__aux); \
227 (low) = (u32)__val; \
228 (high) = (u32)(__val >> 32); \
232 #define rdtscpll(val, aux) \
234 unsigned long __aux; \
235 val = paravirt_rdtscp(&__aux); \
273 static inline unsigned long paravirt_store_tr(
void)
277 #define store_tr(tr) ((tr) = paravirt_store_tr())
284 static inline void load_gs_index(
unsigned int gs)
315 #ifdef REALLY_SLOW_IO
323 static inline void startup_ipi_hook(
int phys_apicid,
unsigned long start_eip,
324 unsigned long start_esp)
327 phys_apicid, start_eip, start_esp);
331 static inline void paravirt_activate_mm(
struct mm_struct *
prev,
337 static inline void arch_dup_mmap(
struct mm_struct *oldmm,
374 static inline void paravirt_pgd_free(
struct mm_struct *mm,
pgd_t *pgd)
379 static inline void paravirt_alloc_pte(
struct mm_struct *mm,
unsigned long pfn)
383 static inline void paravirt_release_pte(
unsigned long pfn)
388 static inline void paravirt_alloc_pmd(
struct mm_struct *mm,
unsigned long pfn)
393 static inline void paravirt_release_pmd(
unsigned long pfn)
398 static inline void paravirt_alloc_pud(
struct mm_struct *mm,
unsigned long pfn)
402 static inline void paravirt_release_pud(
unsigned long pfn)
434 if (
sizeof(
pteval_t) >
sizeof(
long))
437 val, (
u64)val >> 32);
443 return (
pte_t) { .pte = ret };
450 if (
sizeof(
pteval_t) >
sizeof(
long))
464 if (
sizeof(
pgdval_t) >
sizeof(
long))
466 val, (
u64)val >> 32);
471 return (
pgd_t) { ret };
478 if (
sizeof(
pgdval_t) >
sizeof(
long))
488 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
497 return (
pte_t) { .pte = ret };
500 static inline void ptep_modify_prot_commit(
struct mm_struct *mm,
unsigned long addr,
503 if (
sizeof(
pteval_t) >
sizeof(
long))
508 mm, addr, ptep, pte.
pte);
513 if (
sizeof(
pteval_t) >
sizeof(
long))
524 if (
sizeof(
pteval_t) >
sizeof(
long))
531 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
535 if (
sizeof(
pmdval_t) >
sizeof(
long))
540 native_pmd_val(pmd));
548 if (
sizeof(
pmdval_t) >
sizeof(
long))
554 #if PAGETABLE_LEVELS >= 3
559 if (
sizeof(
pmdval_t) >
sizeof(
long))
561 val, (
u64)val >> 32);
566 return (
pmd_t) { ret };
573 if (
sizeof(
pmdval_t) >
sizeof(
long))
587 if (
sizeof(
pudval_t) >
sizeof(
long))
589 val, (
u64)val >> 32);
594 #if PAGETABLE_LEVELS == 4
599 if (
sizeof(
pudval_t) >
sizeof(
long))
601 val, (
u64)val >> 32);
606 return (
pud_t) { ret };
613 if (
sizeof(
pudval_t) >
sizeof(
long))
627 if (
sizeof(
pgdval_t) >
sizeof(
long))
629 val, (
u64)val >> 32);
649 #ifdef CONFIG_X86_PAE
686 #define __HAVE_ARCH_START_CONTEXT_SWITCH
697 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
716 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
727 #define arch_spin_is_contended arch_spin_is_contended
753 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
754 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
757 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
758 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
760 #define PV_FLAGS_ARG "0"
761 #define PV_EXTRA_CLOBBERS
762 #define PV_VEXTRA_CLOBBERS
765 #define PV_SAVE_ALL_CALLER_REGS \
774 #define PV_RESTORE_ALL_CALLER_REGS \
786 #define PV_SAVE_REGS "pushq %%rdi;"
787 #define PV_RESTORE_REGS "popq %%rdi;"
788 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
789 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
790 #define PV_FLAGS_ARG "D"
805 #define PV_CALLEE_SAVE_REGS_THUNK(func) \
806 extern typeof(func) __raw_callee_save_##func; \
807 static void *__##func##__ __used = func; \
809 asm(".pushsection .text;" \
810 "__raw_callee_save_" #func ": " \
811 PV_SAVE_ALL_CALLER_REGS \
813 PV_RESTORE_ALL_CALLER_REGS \
818 #define PV_CALLEE_SAVE(func) \
819 ((struct paravirt_callee_save) { __raw_callee_save_##func })
822 #define __PV_IS_CALLEE_SAVE(func) \
823 ((struct paravirt_callee_save) { func })
874 #define _PVSITE(ptype, clobbers, ops, word, algn) \
878 .pushsection .parainstructions,"a"; \
887 #define COND_PUSH(set, mask, reg) \
888 .if ((~(set)) & mask); push %reg; .endif
889 #define COND_POP(set, mask, reg) \
890 .if ((~(set)) & mask); pop %reg; .endif
894 #define PV_SAVE_REGS(set) \
895 COND_PUSH(set, CLBR_RAX, rax); \
896 COND_PUSH(set, CLBR_RCX, rcx); \
897 COND_PUSH(set, CLBR_RDX, rdx); \
898 COND_PUSH(set, CLBR_RSI, rsi); \
899 COND_PUSH(set, CLBR_RDI, rdi); \
900 COND_PUSH(set, CLBR_R8, r8); \
901 COND_PUSH(set, CLBR_R9, r9); \
902 COND_PUSH(set, CLBR_R10, r10); \
903 COND_PUSH(set, CLBR_R11, r11)
904 #define PV_RESTORE_REGS(set) \
905 COND_POP(set, CLBR_R11, r11); \
906 COND_POP(set, CLBR_R10, r10); \
907 COND_POP(set, CLBR_R9, r9); \
908 COND_POP(set, CLBR_R8, r8); \
909 COND_POP(set, CLBR_RDI, rdi); \
910 COND_POP(set, CLBR_RSI, rsi); \
911 COND_POP(set, CLBR_RDX, rdx); \
912 COND_POP(set, CLBR_RCX, rcx); \
913 COND_POP(set, CLBR_RAX, rax)
915 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
916 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
917 #define PARA_INDIRECT(addr) *addr(%rip)
919 #define PV_SAVE_REGS(set) \
920 COND_PUSH(set, CLBR_EAX, eax); \
921 COND_PUSH(set, CLBR_EDI, edi); \
922 COND_PUSH(set, CLBR_ECX, ecx); \
923 COND_PUSH(set, CLBR_EDX, edx)
924 #define PV_RESTORE_REGS(set) \
925 COND_POP(set, CLBR_EDX, edx); \
926 COND_POP(set, CLBR_ECX, ecx); \
927 COND_POP(set, CLBR_EDI, edi); \
928 COND_POP(set, CLBR_EAX, eax)
930 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
931 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
932 #define PARA_INDIRECT(addr) *%cs:addr
935 #define INTERRUPT_RETURN \
936 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
937 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
939 #define DISABLE_INTERRUPTS(clobbers) \
940 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
941 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
942 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
943 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
945 #define ENABLE_INTERRUPTS(clobbers) \
946 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
947 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
948 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
949 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
951 #define USERGS_SYSRET32 \
952 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
954 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
957 #define GET_CR0_INTO_EAX \
958 push %ecx; push %edx; \
959 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
962 #define ENABLE_INTERRUPTS_SYSEXIT \
963 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
965 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
975 #define SWAPGS_UNSAFE_STACK \
976 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
986 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
987 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
990 #define GET_CR2_INTO_RAX \
991 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
993 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
994 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
996 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
998 #define USERGS_SYSRET64 \
999 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1001 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1003 #define ENABLE_INTERRUPTS_SYSEXIT32 \
1004 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1006 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1011 # define default_banner x86_init_noop