18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/string.h>
23 #include <linux/types.h>
24 #include <linux/ptrace.h>
25 #include <linux/mman.h>
29 #include <linux/module.h>
32 #include <linux/perf_event.h>
33 #include <linux/magic.h>
38 #include <asm/pgtable.h>
40 #include <asm/mmu_context.h>
41 #include <asm/uaccess.h>
42 #include <asm/tlbflush.h>
43 #include <asm/siginfo.h>
44 #include <asm/debug.h>
50 static inline int notify_page_fault(
struct pt_regs *
regs)
65 static inline int notify_page_fault(
struct pt_regs *regs)
75 static int store_updates_sp(
struct pt_regs *regs)
79 if (
get_user(inst, (
unsigned int __user *)regs->
nip))
82 if (((inst >> 16) & 0x1f) != 1)
93 return (inst & 3) == 1;
96 switch ((inst >> 1) & 0x3ff) {
112 #define MM_FAULT_RETURN 0
113 #define MM_FAULT_CONTINUE -1
114 #define MM_FAULT_ERR(sig) (sig)
116 static int out_of_memory(
struct pt_regs *regs)
140 info.si_addr = (
void __user *)address;
147 static int mm_fault_error(
struct pt_regs *regs,
unsigned long addr,
int fault)
153 if (fatal_signal_pending(
current)) {
159 if (!(fault & VM_FAULT_RETRY))
168 if (!(fault & VM_FAULT_ERROR))
172 if (fault & VM_FAULT_OOM)
173 return out_of_memory(regs);
178 if (fault & VM_FAULT_SIGBUS)
179 return do_sigbus(regs, addr);
204 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
208 int is_exec = trap == 0x400;
211 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
219 error_code &= 0x48200000;
221 is_write = error_code & DSISR_ISSTORE;
223 is_write = error_code & ESR_DST;
227 flags |= FAULT_FLAG_WRITE;
229 #ifdef CONFIG_PPC_ICSWX
242 if (notify_page_fault(regs))
245 if (
unlikely(debugger_fault_handler(regs)))
252 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
253 defined(CONFIG_PPC_BOOK3S_64))
254 if (error_code & DSISR_DABRMATCH) {
256 do_dabr(regs, address, error_code);
262 if (!arch_irq_disabled_regs(regs))
271 "in_atomic() = %d mm = %p\n",
in_atomic(), mm);
296 goto bad_area_nosemaphore;
314 if (!(vma->
vm_flags & VM_GROWSDOWN))
326 if (address + 0x100000 < vma->vm_end) {
344 if (address + 2048 < uregs->
gpr[1]
345 && (!
user_mode(regs) || !store_updates_sp(regs)))
353 #if defined(CONFIG_6xx)
354 if (error_code & 0x95700000)
359 #if defined(CONFIG_8xx)
363 if (error_code & 0x40000000)
364 _tlbil_va(address, 0, 0, 0);
370 if (error_code & 0x10000000)
376 #ifdef CONFIG_PPC_STD_MMU
385 if (error_code & DSISR_PROTFAULT)
401 !(vma->
vm_flags & (VM_READ | VM_WRITE))))
404 }
else if (is_write) {
410 if (error_code & 0x08000000)
412 if (!(vma->
vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
422 if (
unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
423 int rc = mm_fault_error(regs, address, fault);
433 if (flags & FAULT_FLAG_ALLOW_RETRY) {
434 if (fault & VM_FAULT_MAJOR) {
438 #ifdef CONFIG_PPC_SMLPAR
439 if (firmware_has_feature(FW_FEATURE_CMO)) {
450 if (fault & VM_FAULT_RETRY) {
453 flags &= ~FAULT_FLAG_ALLOW_RETRY;
454 flags |= FAULT_FLAG_TRIED;
465 bad_area_nosemaphore:
472 if (is_exec && (error_code & DSISR_PROTFAULT))
474 " page (%lx) - exploit attempt? (uid: %d)\n",
489 unsigned long *stackend;
499 switch (regs->
trap) {
503 "data at address 0x%08lx\n", regs->
dar);
508 "instruction fetch\n");
522 die(
"Kernel access of bad area", regs, sig);