11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
21 #include <linux/perf_event.h>
26 #include <asm/pgtable.h>
27 #include <asm/openprom.h>
28 #include <asm/oplib.h>
30 #include <asm/traps.h>
31 #include <asm/uaccess.h>
35 static void unhandled_fault(
unsigned long,
struct task_struct *,
42 if ((
unsigned long) address <
PAGE_SIZE) {
44 "Unable to handle kernel NULL pointer dereference\n");
46 printk(
KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
50 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
52 (tsk->mm ? (
unsigned long) tsk->mm->pgd :
53 (
unsigned long) tsk->active_mm->pgd));
74 insn = *((
unsigned int *) pc);
82 insn = *((
unsigned int *) pc);
83 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
91 memset(®s, 0,
sizeof(regs));
98 "nop\n" :
"=r" (regs.
psr));
99 unhandled_fault(address,
current, ®s);
112 if (!printk_ratelimit())
115 printk(
"%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
117 tsk->
comm, task_pid_nr(tsk), address,
126 static void __do_fault_siginfo(
int code,
int sig,
struct pt_regs *regs,
134 info.si_addr = (
void __user *) addr;
138 show_signal_msg(regs, sig, info.
si_code,
147 static unsigned long compute_si_addr(
struct pt_regs *regs,
int text_fault)
155 insn = *(
unsigned int *) regs->
pc;
162 static noinline void do_fault_siginfo(
int code,
int sig,
struct pt_regs *regs,
165 unsigned long addr = compute_si_addr(regs, text_fault);
167 __do_fault_siginfo(code, sig, regs, addr);
171 unsigned long address)
180 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
181 (write ? FAULT_FLAG_WRITE : 0));
219 if (!(vma->
vm_flags & VM_GROWSDOWN))
234 if (!(vma->
vm_flags & (VM_READ | VM_EXEC)))
245 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(
current))
248 if (
unlikely(fault & VM_FAULT_ERROR)) {
249 if (fault & VM_FAULT_OOM)
251 else if (fault & VM_FAULT_SIGBUS)
256 if (flags & FAULT_FLAG_ALLOW_RETRY) {
257 if (fault & VM_FAULT_MAJOR) {
266 if (fault & VM_FAULT_RETRY) {
267 flags &= ~FAULT_FLAG_ALLOW_RETRY;
268 flags |= FAULT_FLAG_TRIED;
289 bad_area_nosemaphore:
292 do_fault_siginfo(code,
SIGSEGV, regs, text_fault);
303 extern const unsigned __memset_start[];
304 extern const unsigned __memset_end[];
305 extern const unsigned __csum_partial_copy_start[];
306 extern const unsigned __csum_partial_copy_end[];
308 #ifdef DEBUG_EXCEPTIONS
309 printk(
"Exception: PC<%08lx> faddr<%08lx>\n",
311 printk(
"EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
312 regs->
pc, fixup, g2);
314 if ((regs->
pc >= (
unsigned long)__memset_start &&
315 regs->
pc < (
unsigned long)__memset_end) ||
316 (regs->
pc >= (
unsigned long)__csum_partial_copy_start &&
317 regs->
pc < (
unsigned long)__csum_partial_copy_end)) {
323 regs->
npc = regs->
pc + 4;
328 unhandled_fault(address, tsk, regs);
364 goto bad_area_nosemaphore;
373 goto bad_area_nosemaphore;
381 static void force_user_fault(
unsigned long address,
int write)
396 if (!(vma->
vm_flags & VM_GROWSDOWN))
406 if (!(vma->
vm_flags & (VM_READ | VM_EXEC)))
409 switch (
handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
410 case VM_FAULT_SIGBUS:
418 __do_fault_siginfo(code,
SIGSEGV, tsk->
thread.kregs, address);
426 static void check_stack_aligned(
unsigned long sp)
437 if (((sp + 0x38) &
PAGE_MASK) != (sp & PAGE_MASK))
438 force_user_fault(sp + 0x38, 1);
439 force_user_fault(sp, 1);
441 check_stack_aligned(sp);
446 if (((sp + 0x38) &
PAGE_MASK) != (sp & PAGE_MASK))
447 force_user_fault(sp + 0x38, 0);
448 force_user_fault(sp, 0);
450 check_stack_aligned(sp);
458 if (((sp + 0x38) &
PAGE_MASK) != (sp & PAGE_MASK))
459 force_user_fault(sp + 0x38, 0);
460 force_user_fault(sp, 0);
462 check_stack_aligned(sp);