12 #include <linux/perf_event.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
27 #include <linux/module.h>
32 #include <asm/asm-offsets.h>
33 #include <asm/pgtable.h>
35 #include <asm/mmu_context.h>
37 #include "../kernel/entry.h"
40 #define __FAIL_ADDR_MASK 0x7ffff000
41 #define __SUBCODE_MASK 0x0200
42 #define __PF_RES_FIELD 0ULL
44 #define __FAIL_ADDR_MASK -4096L
45 #define __SUBCODE_MASK 0x0600
46 #define __PF_RES_FIELD 0x8000000000000000ULL
49 #define VM_FAULT_BADCONTEXT 0x010000
50 #define VM_FAULT_BADMAP 0x020000
51 #define VM_FAULT_BADACCESS 0x040000
52 #define VM_FAULT_SIGNAL 0x080000
54 static unsigned long store_indication;
58 if (test_facility(2) && test_facility(75))
59 store_indication = 0xc00;
62 static inline int notify_page_fault(
struct pt_regs *
regs)
67 if (kprobes_built_in() && !
user_mode(regs)) {
104 static inline int user_space_fault(
unsigned long trans_exc_code)
111 if (trans_exc_code == 2)
113 return current->thread.mm_segment.ar4;
116 return trans_exc_code == 3;
123 return trans_exc_code != 3;
126 static inline void report_user_fault(
struct pt_regs *regs,
long signr)
132 if (!printk_ratelimit())
151 report_user_fault(regs,
SIGSEGV);
177 " at virtual kernel address %p\n", (
void *)address);
180 " at virtual user address %p\n", (
void *)address);
192 die (regs,
"Low-address protection");
227 do_sigsegv(regs, si_code);
238 if (fault & VM_FAULT_OOM) {
243 }
else if (fault & VM_FAULT_SIGBUS) {
266 static inline int do_exception(
struct pt_regs *regs,
int access)
271 unsigned long trans_exc_code;
276 if (notify_page_fault(regs))
294 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
295 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
296 flags |= FAULT_FLAG_WRITE;
308 fault = VM_FAULT_OOM;
321 if (!(vma->
vm_flags & VM_GROWSDOWN))
335 if (is_vm_hugetlb_page(vma))
344 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(
current)) {
348 if (
unlikely(fault & VM_FAULT_ERROR))
356 if (flags & FAULT_FLAG_ALLOW_RETRY) {
357 if (fault & VM_FAULT_MAJOR) {
366 if (fault & VM_FAULT_RETRY) {
369 flags &= ~FAULT_FLAG_ALLOW_RETRY;
370 flags |= FAULT_FLAG_TRIED;
389 unsigned long trans_exc_code;
400 if (
unlikely(!(trans_exc_code & 4))) {
401 do_low_address(regs);
404 fault = do_exception(regs, VM_WRITE);
406 do_fault_error(regs, fault);
413 access = VM_READ | VM_EXEC | VM_WRITE;
414 fault = do_exception(regs, access);
416 do_fault_error(regs, fault);
424 unsigned long trans_exc_code;
431 vma =
find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
459 regs.
psw.addr = (
unsigned long) __builtin_return_address(0);
463 access = write ? VM_WRITE : VM_READ;
464 fault = do_exception(®s, access);
471 return fault ? -
EFAULT : 0;
478 static int pfault_disable;
488 struct pfault_refbk {
501 struct pfault_refbk refbk = {
506 .refgaddr = __LC_CURRENT_PID,
507 .refselmk = 1ULL << 48,
508 .refcmpmk = 1ULL << 48,
515 " diag %1,%0,0x258\n"
520 :
"=d" (rc) :
"a" (&refbk),
"m" (refbk) :
"cc");
526 struct pfault_refbk refbk = {
539 : :
"a" (&refbk),
"m" (refbk) :
"cc");
546 unsigned int param32,
unsigned long param64)
563 pid =
sizeof(
void *) == 4 ? param32 : param64;
571 spin_lock(&pfault_lock);
572 if (subcode & 0x0080) {
574 if (tsk->
thread.pfault_wait == 1) {
580 tsk->
thread.pfault_wait = 0;
583 put_task_struct(tsk);
593 tsk->
thread.pfault_wait = -1;
599 if (tsk->
thread.pfault_wait == 1) {
602 set_tsk_need_resched(tsk);
603 }
else if (tsk->
thread.pfault_wait == -1) {
607 tsk->
thread.pfault_wait = 0;
615 tsk->
thread.pfault_wait = 1;
616 list_add(&tsk->
thread.list, &pfault_list);
618 set_tsk_need_resched(tsk);
622 spin_unlock(&pfault_lock);
623 put_task_struct(tsk);
627 unsigned long action,
void *hcpu)
634 spin_lock_irq(&pfault_lock);
640 put_task_struct(tsk);
642 spin_unlock_irq(&pfault_lock);
650 static int __init pfault_irq_init(
void)