17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
29 #include <linux/tty.h>
32 #include <linux/module.h>
38 #include <asm/pgalloc.h>
39 #include <asm/sections.h>
40 #include <asm/traps.h>
41 #include <asm/syscalls.h>
45 static noinline void force_sig_info_fault(
const char *
type,
int si_signo,
46 int si_code,
unsigned long address,
54 panic(
"Signal %d (code %d) at %#lx sent to %s!",
55 si_signo, si_code & 0xffff, address,
56 is_idle_task(tsk) ?
"the idle task" :
"init");
62 info.si_addr = (
void __user *)address;
63 info.si_trapno = fault_num;
80 force_sig_info_fault(
"atomic alignment fault",
SIGBUS,
102 static inline pmd_t *vmalloc_sync_one(
pgd_t *pgd,
unsigned long address)
135 static inline int vmalloc_fault(
pgd_t *pgd,
unsigned long address)
148 pmd_k = vmalloc_sync_one(pgd, address);
160 static void wait_for_migration(
pte_t *
pte)
172 if (++retries > bound)
173 panic(
"Hit migrating PTE (%#llx) and"
174 " page PFN %#lx still migrating",
185 static pgd_t *get_current_pgd(
void)
190 BUG_ON(PageHighMem(pgd_page));
212 static int handle_migrating_pte(
pgd_t *pgd,
int fault_num,
213 unsigned long address,
unsigned long pc,
214 int is_kernel_mode,
int write)
221 if (pgd_addr_invalid(address))
237 wait_for_migration(pte);
262 static int handle_page_fault(
struct pt_regs *regs,
265 unsigned long address,
271 unsigned long stack_offset;
282 flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
283 (write ? FAULT_FLAG_WRITE : 0));
297 pr_alert(
"Potential stack overrun: sp %#lx\n",
300 pr_alert(
"Killing current process %d/%s\n",
313 pgd = get_current_pgd();
314 if (handle_migrating_pte(pgd, fault_num, address, regs->
pc,
315 is_kernel_mode, write))
334 if (is_kernel_mode && is_page_fault &&
335 vmalloc_fault(pgd, address) >= 0)
343 goto bad_area_nosemaphore;
364 goto bad_area_nosemaphore;
384 if (is_kernel_mode &&
387 goto bad_area_nosemaphore;
399 if (!(vma->
vm_flags & VM_GROWSDOWN))
421 #ifdef TEST_VERIFY_AREA
422 if (!is_page_fault && regs->
cs == KERNEL_CS)
428 if (!is_page_fault || !(vma->
vm_flags & VM_READ))
440 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(
current))
443 if (
unlikely(fault & VM_FAULT_ERROR)) {
444 if (fault & VM_FAULT_OOM)
446 else if (fault & VM_FAULT_SIGBUS)
450 if (flags & FAULT_FLAG_ALLOW_RETRY) {
451 if (fault & VM_FAULT_MAJOR)
455 if (fault & VM_FAULT_RETRY) {
456 flags &= ~FAULT_FLAG_ALLOW_RETRY;
457 flags |= FAULT_FLAG_TRIED;
468 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
474 #if CHIP_HAS_TILE_DMA()
482 #if CHIP_HAS_SN_PROC()
503 bad_area_nosemaphore:
505 if (!is_kernel_mode) {
511 force_sig_info_fault(
"segfault",
SIGSEGV, si_code, address,
512 fault_num, tsk, regs);
529 #ifdef SUPPORT_LOOKUP_ADDRESS
533 if (pte &&
pte_present(*pte) && !pte_exec_kernel(*pte))
534 pr_crit(
"kernel tried to execute"
535 " non-executable page - exploit attempt?"
540 pr_alert(
"Unable to handle kernel NULL pointer dereference\n");
542 pr_alert(
"Unable to handle kernel paging request\n");
549 panic(
"Kernel page fault running %s!",
550 is_idle_task(tsk) ?
"the idle task" :
"init");
570 if (is_global_init(tsk)) {
588 fault_num, tsk, regs);
595 #define ics_panic(fmt, ...) do { \
596 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
597 panic(fmt, __VA_ARGS__); \
620 unsigned long pc = info & ~1;
621 int write = info & 1;
622 pgd_t *pgd = get_current_pgd();
633 unsigned long old_pc = regs->pc;
636 " old PC %#lx, fault %d/%d at %#lx\n",
637 old_pc, fault_num, write, address);
666 __atomic_fault_unlock(lock_ptr);
669 regs->sp = regs->regs[27];
685 __atomic_fault_unlock(lock_ptr);
689 ics_panic(
"ICS atomic fault not in table:"
690 " PC %#lx, fault %d", pc, fault_num);
691 regs->pc = fixup->
fixup;
701 if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
720 unsigned long address,
unsigned long write)
727 #if CHIP_HAS_TILE_DMA()
750 #if CHIP_HAS_TILE_DMA()
754 #if CHIP_HAS_SN_PROC()
762 #if CHIP_HAS_TILE_DMA()
770 panic(
"Bad fault number %d in do_page_fault", fault_num);
773 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
777 #if CHIP_HAS_TILE_DMA()
782 async = &
current->thread.dma_async_tlb;
785 #if CHIP_HAS_SN_PROC()
788 async = &
current->thread.sn_async_tlb;
804 panic(
"Second async fault %d;"
805 " old fault was %d (%#lx/%ld)",
819 handle_page_fault(regs, fault_num, is_page_fault, address, write);
823 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
828 static void handle_async_page_fault(
struct pt_regs *regs,
840 handle_page_fault(regs, fault_num, async->
is_fault,
858 #if CHIP_HAS_TILE_DMA()
859 handle_async_page_fault(regs, &
current->thread.dma_async_tlb);
861 #if CHIP_HAS_SN_PROC()
862 handle_async_page_fault(regs, &
current->thread.sn_async_tlb);
898 spin_unlock_irqrestore(&
pgd_lock, flags);