1 #ifndef _ASM_IA64_PROCESSOR_H
2 #define _ASM_IA64_PROCESSOR_H
17 #include <asm/intrinsics.h>
19 #include <asm/ptrace.h>
20 #include <asm/ustack.h>
22 #define __ARCH_WANT_UNLOCKED_CTXSW
23 #define ARCH_HAS_PREFETCH_SWITCH_STACK
25 #define IA64_NUM_PHYS_STACK_REG 96
26 #define IA64_NUM_DBG_REGS 8
28 #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
29 #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
37 #define TASK_SIZE DEFAULT_TASK_SIZE
43 #define TASK_UNMAPPED_BASE (current->thread.map_base)
45 #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0)
46 #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1)
47 #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2)
48 #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3)
49 #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4)
50 #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5)
52 #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)
53 #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7)
55 #define IA64_THREAD_UAC_SHIFT 3
56 #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
57 #define IA64_THREAD_FPEMU_SHIFT 6
58 #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
66 #define IA64_NSEC_PER_CYC_SHIFT 30
71 #include <linux/compiler.h>
73 #include <linux/types.h>
77 #include <asm/percpu.h>
79 #include <asm/unwind.h>
209 unsigned int socket_id;
210 unsigned short core_id;
211 unsigned short thread_id;
212 unsigned short num_log;
214 unsigned char cores_per_socket;
242 #define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
243 #define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
251 #define SET_UNALIGN_CTL(task,value) \
253 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
254 | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
257 #define GET_UNALIGN_CTL(task,addr) \
259 put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
260 (int __user *) (addr)); \
263 #define SET_FPEMU_CTL(task,value) \
265 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
266 | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
269 #define GET_FPEMU_CTL(task,addr) \
271 put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
272 (int __user *) (addr)); \
285 #ifdef CONFIG_PERFMON
287 unsigned long pfm_needs_checking;
288 # define INIT_THREAD_PM .pfm_context = NULL, \
289 .pfm_needs_checking = 0UL,
291 # define INIT_THREAD_PM
298 #define INIT_THREAD { \
302 .map_base = DEFAULT_MAP_BASE, \
303 .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
304 .last_fph_cpu = -1, \
311 #define start_thread(regs,new_ip,new_sp) do { \
312 regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
313 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
314 regs->cr_iip = new_ip; \
315 regs->ar_rsc = 0xf; \
317 regs->ar_bspstore = current->thread.rbs_bot; \
318 regs->ar_fpsr = FPSR_DEFAULT; \
320 regs->r8 = get_dumpable(current->mm); \
321 regs->r12 = new_sp - 16; \
322 if (unlikely(!get_dumpable(current->mm))) { \
327 regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
328 regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
341 #define release_thread(dead_task)
363 #define KSTK_EIP(tsk) \
365 struct pt_regs *_regs = task_pt_regs(tsk); \
366 _regs->cr_iip + ia64_psr(_regs)->ri; \
370 #define KSTK_ESP(tsk) ((tsk)->thread.ksp)
375 #define ia64_get_kr(regnum) \
377 unsigned long r = 0; \
380 case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
381 case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
382 case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
383 case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
384 case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
385 case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
386 case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
387 case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
388 default: ia64_getreg_unknown_kr(); break; \
393 #define ia64_set_kr(regnum, r) \
396 case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
397 case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
398 case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
399 case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
400 case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
401 case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
402 case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
403 case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
404 default: ia64_setreg_unknown_kr(); break; \
417 #define ia64_is_local_fpu_owner(t) \
419 struct task_struct *__ia64_islfo_task = (t); \
420 (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
421 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
428 #define ia64_set_local_fpu_owner(t) do { \
429 struct task_struct *__ia64_slfo_task = (t); \
430 __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
431 ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
435 #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
443 #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
444 #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
448 ia64_init_fpu (
void) {
504 if (target_mask & 0x1)
506 if (target_mask & 0x2)
522 if (target_mask & 0x1)
524 if (target_mask & 0x2)
535 if (target_mask & 0x1)
537 if (target_mask & 0x2)
543 ia64_set_iva (
void *ivt_addr)
551 ia64_set_pta (
__u64 pta)
565 #define cpu_relax() ia64_hint(ia64_hint_pause)
568 ia64_get_irr(
unsigned int vector)
570 unsigned int reg = vector / 64;
571 unsigned int bit = vector % 64;
585 ia64_set_lrr0 (
unsigned long val)
592 ia64_set_lrr1 (
unsigned long val)
604 ia64_unat_pos (
void *spill_addr)
606 return ((
__u64) spill_addr >> 3) & 0x3f;
614 ia64_set_unat (
__u64 *unat,
void *spill_addr,
unsigned long nat)
616 __u64 bit = ia64_unat_pos(spill_addr);
619 *unat = (*unat & ~mask) | (nat << bit);
626 static inline unsigned long
635 unw_get_ip(&
info, &ip);
642 #define current_text_addr() \
643 ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
659 #ifdef CONFIG_ITANIUM
665 ia64_get_dbr (
__u64 regnum)
670 #ifdef CONFIG_ITANIUM
679 return (w >> n) | (w << (64 -
n));
682 #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
689 ia64_imva (
void *
addr)
696 #define ARCH_HAS_PREFETCH
697 #define ARCH_HAS_PREFETCHW
698 #define ARCH_HAS_SPINLOCK_PREFETCH
699 #define PREFETCH_STRIDE L1_CACHE_BYTES
713 #define spin_lock_prefetch(x) prefetchw(x)
722 #define ia64_platform_is(x) (strcmp(x, ia64_platform_name) == 0)