7 #include <linux/sched.h>
8 #include <linux/kernel.h>
13 #include <linux/prefetch.h>
15 #include <asm/pgtable.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
19 extern int die(
char *,
struct pt_regs *,
long);
37 static inline int notify_page_fault(
struct pt_regs *regs,
int trap)
48 mapped_kernel_page_is_present (
unsigned long address)
75 # define VM_READ_BIT 0
76 # define VM_WRITE_BIT 1
77 # define VM_EXEC_BIT 2
88 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
93 flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
104 #ifdef CONFIG_VIRTUAL_MEM_MAP
126 if (!vma && !prev_vma )
137 goto check_expansion;
144 # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
145 || (1 << VM_EXEC_BIT) != VM_EXEC)
146 # error File is out of sync with <linux/mm.h>. Please update.
162 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(
current))
165 if (
unlikely(fault & VM_FAULT_ERROR)) {
171 if (fault & VM_FAULT_OOM) {
173 }
else if (fault & VM_FAULT_SIGBUS) {
180 if (flags & FAULT_FLAG_ALLOW_RETRY) {
181 if (fault & VM_FAULT_MAJOR)
185 if (fault & VM_FAULT_RETRY) {
186 flags &= ~FAULT_FLAG_ALLOW_RETRY;
187 flags |= FAULT_FLAG_TRIED;
202 if (!(prev_vma && (prev_vma->
vm_flags & VM_GROWSUP) && (address == prev_vma->
vm_end))) {
205 if (!(vma->
vm_flags & VM_GROWSDOWN))
223 if (expand_upwards(vma, address))
230 #ifdef CONFIG_VIRTUAL_MEM_MAP
248 si.si_addr = (
void __user *) address;
256 if ((isr & IA64_ISR_SP)
275 if (
REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
278 if (ia64_done_with_exception(regs))
288 printk(
KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
291 "virtual address %016lx\n", address);
292 if (
die(
"Oops", regs, isr))