1 #ifndef _ASM_X86_PROCESSOR_H
2 #define _ASM_X86_PROCESSOR_H
12 #include <asm/segment.h>
13 #include <asm/types.h>
14 #include <asm/sigcontext.h>
15 #include <asm/current.h>
16 #include <asm/cpufeature.h>
19 #include <asm/percpu.h>
23 #include <asm/special_insns.h>
25 #include <linux/personality.h>
40 #define NET_IP_ALIGN 0
51 asm volatile(
"mov $1f, %0; 1:":
"=r" (
pc));
56 #ifdef CONFIG_X86_VSMP
57 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
58 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
60 # define ARCH_MIN_TASKALIGN 16
61 # define ARCH_MIN_MMSTRUCT_ALIGN 0
137 #define X86_VENDOR_INTEL 0
138 #define X86_VENDOR_CYRIX 1
139 #define X86_VENDOR_AMD 2
140 #define X86_VENDOR_UMC 3
141 #define X86_VENDOR_CENTAUR 5
142 #define X86_VENDOR_TRANSMETA 7
143 #define X86_VENDOR_NSC 8
144 #define X86_VENDOR_NUM 9
146 #define X86_VENDOR_UNKNOWN 0xff
160 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
162 #define cpu_info boot_cpu_data
163 #define cpu_data(cpu) boot_cpu_data
168 static inline int hlt_works(
int cpu)
177 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
195 static inline void native_cpuid(
unsigned int *eax,
unsigned int *
ebx,
196 unsigned int *
ecx,
unsigned int *edx)
204 :
"0" (*eax),
"2" (*ecx)
210 write_cr3(
__pa(pgdir));
216 unsigned short back_link, __blh;
218 unsigned short ss0, __ss0h;
221 unsigned short ss1, __ss1h;
223 unsigned short ss2, __ss2h;
235 unsigned short es, __esh;
236 unsigned short cs, __csh;
237 unsigned short ss, __ssh;
238 unsigned short ds, __dsh;
239 unsigned short fs, __fsh;
240 unsigned short gs, __gsh;
241 unsigned short ldt, __ldth;
242 unsigned short trace;
265 #define IO_BITMAP_BITS 65536
266 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
267 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
268 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
269 #define INVALID_IO_BITMAP_OFFSET 0x8000
301 #define MXCSR_DEFAULT 0x1f80
408 union irq_stack_union {
417 unsigned long stack_canary;
428 #ifdef CONFIG_CC_STACKPROTECTOR
435 struct stack_canary {
437 unsigned long canary;
455 unsigned long sysenter_cs;
486 unsigned long v86flags;
487 unsigned long v86mask;
488 unsigned long saved_sp0;
489 unsigned int saved_fs;
490 unsigned int saved_gs;
502 static inline void native_set_iopl_mask(
unsigned mask)
507 asm volatile (
"pushfl;"
525 tss->
x86_tss.ss1 = thread->sysenter_cs;
531 static inline void native_swapgs(
void)
534 asm volatile(
"swapgs" :::
"memory");
538 #ifdef CONFIG_PARAVIRT
539 #include <asm/paravirt.h>
541 #define __cpuid native_cpuid
542 #define paravirt_enabled() 0
544 static inline void load_sp0(
struct tss_struct *tss,
547 native_load_sp0(tss, thread);
550 #define set_iopl_mask native_set_iopl_mask
562 static inline void set_in_cr4(
unsigned long mask)
566 mmu_cr4_features |=
mask;
567 if (trampoline_cr4_features)
574 static inline void clear_in_cr4(
unsigned long mask)
578 mmu_cr4_features &= ~mask;
579 if (trampoline_cr4_features)
601 static inline void cpuid(
unsigned int op,
602 unsigned int *eax,
unsigned int *
ebx,
603 unsigned int *
ecx,
unsigned int *edx)
611 static inline void cpuid_count(
unsigned int op,
int count,
612 unsigned int *eax,
unsigned int *
ebx,
613 unsigned int *
ecx,
unsigned int *edx)
623 static inline unsigned int cpuid_eax(
unsigned int op)
625 unsigned int eax,
ebx,
ecx, edx;
627 cpuid(op, &eax, &ebx, &ecx, &edx);
632 static inline unsigned int cpuid_ebx(
unsigned int op)
634 unsigned int eax,
ebx,
ecx, edx;
636 cpuid(op, &eax, &ebx, &ecx, &edx);
641 static inline unsigned int cpuid_ecx(
unsigned int op)
643 unsigned int eax,
ebx,
ecx, edx;
645 cpuid(op, &eax, &ebx, &ecx, &edx);
650 static inline unsigned int cpuid_edx(
unsigned int op)
652 unsigned int eax,
ebx,
ecx, edx;
654 cpuid(op, &eax, &ebx, &ecx, &edx);
660 static inline void rep_nop(
void)
662 asm volatile(
"rep; nop" :::
"memory");
671 static inline void sync_core(
void)
675 #if defined(CONFIG_M386) || defined(CONFIG_M486)
679 asm volatile(
"jmp 1f\n1:\n" :::
"memory");
685 asm volatile(
"cpuid" :
"=a" (
tmp) :
"0" (1)
686 :
"ebx",
"ecx",
"edx",
"memory");
689 static inline void __monitor(
const void *eax,
unsigned long ecx,
693 asm volatile(
".byte 0x0f, 0x01, 0xc8;"
694 ::
"a" (eax),
"c" (ecx),
"d"(edx));
697 static inline void __mwait(
unsigned long eax,
unsigned long ecx)
700 asm volatile(
".byte 0x0f, 0x01, 0xc9;"
701 ::
"a" (eax),
"c" (ecx));
704 static inline void __sti_mwait(
unsigned long eax,
unsigned long ecx)
708 asm volatile(
"sti; .byte 0x0f, 0x01, 0xc9;"
709 ::
"a" (eax),
"c" (ecx));
734 static inline unsigned long get_debugctlmsr(
void)
736 unsigned long debugctlmsr = 0;
738 #ifndef CONFIG_X86_DEBUGCTLMSR
747 static inline void update_debugctlmsr(
unsigned long debugctlmsr)
749 #ifndef CONFIG_X86_DEBUGCTLMSR
772 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
773 #define ARCH_HAS_PREFETCHW
774 #define ARCH_HAS_SPINLOCK_PREFETCH
777 # define BASE_PREFETCH ASM_NOP4
778 # define ARCH_HAS_PREFETCH
780 # define BASE_PREFETCH "prefetcht0 (%1)"
789 static inline void prefetch(
const void *
x)
819 #define TASK_SIZE PAGE_OFFSET
820 #define TASK_SIZE_MAX TASK_SIZE
821 #define STACK_TOP TASK_SIZE
822 #define STACK_TOP_MAX STACK_TOP
824 #define INIT_THREAD { \
825 .sp0 = sizeof(init_stack) + (long)&init_stack, \
827 .sysenter_cs = __KERNEL_CS, \
828 .io_bitmap_ptr = NULL, \
839 .sp0 = sizeof(init_stack) + (long)&init_stack, \
840 .ss0 = __KERNEL_DS, \
841 .ss1 = __KERNEL_CS, \
842 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
844 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
849 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
850 #define KSTK_TOP(info) \
852 unsigned long *__ptr = (unsigned long *)(info); \
853 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
866 #define task_pt_regs(task) \
868 struct pt_regs *__regs__; \
869 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
873 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
879 #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
884 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
885 0xc0000000 : 0xFFFFe000)
887 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
888 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
889 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
890 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
892 #define STACK_TOP TASK_SIZE
893 #define STACK_TOP_MAX TASK_SIZE_MAX
895 #define INIT_THREAD { \
896 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
900 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
907 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
909 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
920 unsigned long new_sp);
926 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
928 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
931 #define GET_TSC_CTL(adr) get_tsc_mode((adr))
932 #define SET_TSC_CTL(val) set_tsc_mode((val))
943 static inline void get_aperfmperf(
struct aperfmperf *am)
951 #define APERFMPERF_SHIFT 10
954 unsigned long calc_aperfmperf_ratio(
struct aperfmperf *old,
957 u64 aperf =
new->aperf - old->
aperf;
958 u64 mperf =
new->mperf - old->
mperf;
959 unsigned long ratio = aperf;
963 ratio = div64_u64(aperf, mperf);
971 #ifdef CONFIG_CPU_SUP_AMD
976 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
977 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
978 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
979 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
980 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
981 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
982 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
985 #define cpu_has_amd_erratum(x) (false)