Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fault_32.c
Go to the documentation of this file.
1 /*
2  * fault.c: Page fault handlers for the Sparc.
3  *
4  * Copyright (C) 1995 David S. Miller ([email protected])
5  * Copyright (C) 1996 Eddie C. Dost ([email protected])
6  * Copyright (C) 1997 Jakub Jelinek ([email protected])
7  */
8 
9 #include <asm/head.h>
10 
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/kdebug.h>
24 
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/openprom.h>
28 #include <asm/oplib.h>
29 #include <asm/smp.h>
30 #include <asm/traps.h>
31 #include <asm/uaccess.h>
32 
34 
35 static void unhandled_fault(unsigned long, struct task_struct *,
36  struct pt_regs *) __attribute__ ((noreturn));
37 
38 static void __noreturn unhandled_fault(unsigned long address,
39  struct task_struct *tsk,
41 {
42  if ((unsigned long) address < PAGE_SIZE) {
44  "Unable to handle kernel NULL pointer dereference\n");
45  } else {
46  printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
47  address);
48  }
49  printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
50  (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
51  printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
52  (tsk->mm ? (unsigned long) tsk->mm->pgd :
53  (unsigned long) tsk->active_mm->pgd));
54  die_if_kernel("Oops", regs);
55 }
56 
57 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
58  unsigned long address)
59 {
60  struct pt_regs regs;
61  unsigned long g2;
62  unsigned int insn;
63  int i;
64 
65  i = search_extables_range(ret_pc, &g2);
66  switch (i) {
67  case 3:
68  /* load & store will be handled by fixup */
69  return 3;
70 
71  case 1:
72  /* store will be handled by fixup, load will bump out */
73  /* for _to_ macros */
74  insn = *((unsigned int *) pc);
75  if ((insn >> 21) & 1)
76  return 1;
77  break;
78 
79  case 2:
80  /* load will be handled by fixup, store will bump out */
81  /* for _from_ macros */
82  insn = *((unsigned int *) pc);
83  if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
84  return 2;
85  break;
86 
87  default:
88  break;
89  }
90 
91  memset(&regs, 0, sizeof(regs));
92  regs.pc = pc;
93  regs.npc = pc + 4;
94  __asm__ __volatile__(
95  "rd %%psr, %0\n\t"
96  "nop\n\t"
97  "nop\n\t"
98  "nop\n" : "=r" (regs.psr));
99  unhandled_fault(address, current, &regs);
100 
101  /* Not reached */
102  return 0;
103 }
104 
105 static inline void
106 show_signal_msg(struct pt_regs *regs, int sig, int code,
107  unsigned long address, struct task_struct *tsk)
108 {
109  if (!unhandled_signal(tsk, sig))
110  return;
111 
112  if (!printk_ratelimit())
113  return;
114 
115  printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
116  task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
117  tsk->comm, task_pid_nr(tsk), address,
118  (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
119  (void *)regs->u_regs[UREG_FP], code);
120 
121  print_vma_addr(KERN_CONT " in ", regs->pc);
122 
123  printk(KERN_CONT "\n");
124 }
125 
126 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
127  unsigned long addr)
128 {
129  siginfo_t info;
130 
131  info.si_signo = sig;
132  info.si_code = code;
133  info.si_errno = 0;
134  info.si_addr = (void __user *) addr;
135  info.si_trapno = 0;
136 
138  show_signal_msg(regs, sig, info.si_code,
139  addr, current);
140 
141  force_sig_info (sig, &info, current);
142 }
143 
144 extern unsigned long safe_compute_effective_address(struct pt_regs *,
145  unsigned int);
146 
147 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
148 {
149  unsigned int insn;
150 
151  if (text_fault)
152  return regs->pc;
153 
154  if (regs->psr & PSR_PS)
155  insn = *(unsigned int *) regs->pc;
156  else
157  __get_user(insn, (unsigned int *) regs->pc);
158 
159  return safe_compute_effective_address(regs, insn);
160 }
161 
162 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
163  int text_fault)
164 {
165  unsigned long addr = compute_si_addr(regs, text_fault);
166 
167  __do_fault_siginfo(code, sig, regs, addr);
168 }
169 
170 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
171  unsigned long address)
172 {
173  struct vm_area_struct *vma;
174  struct task_struct *tsk = current;
175  struct mm_struct *mm = tsk->mm;
176  unsigned int fixup;
177  unsigned long g2;
178  int from_user = !(regs->psr & PSR_PS);
179  int fault, code;
180  unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
181  (write ? FAULT_FLAG_WRITE : 0));
182 
183  if (text_fault)
184  address = regs->pc;
185 
186  /*
187  * We fault-in kernel-space virtual memory on-demand. The
188  * 'reference' page table is init_mm.pgd.
189  *
190  * NOTE! We MUST NOT take any locks for this case. We may
191  * be in an interrupt or a critical region, and should
192  * only copy the information from the master page table,
193  * nothing more.
194  */
195  code = SEGV_MAPERR;
196  if (address >= TASK_SIZE)
197  goto vmalloc_fault;
198 
199  /*
200  * If we're in an interrupt or have no user
201  * context, we must not take the fault..
202  */
203  if (in_atomic() || !mm)
204  goto no_context;
205 
206  perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
207 
208 retry:
209  down_read(&mm->mmap_sem);
210 
211  if (!from_user && address >= PAGE_OFFSET)
212  goto bad_area;
213 
214  vma = find_vma(mm, address);
215  if (!vma)
216  goto bad_area;
217  if (vma->vm_start <= address)
218  goto good_area;
219  if (!(vma->vm_flags & VM_GROWSDOWN))
220  goto bad_area;
221  if (expand_stack(vma, address))
222  goto bad_area;
223  /*
224  * Ok, we have a good vm_area for this memory access, so
225  * we can handle it..
226  */
227 good_area:
228  code = SEGV_ACCERR;
229  if (write) {
230  if (!(vma->vm_flags & VM_WRITE))
231  goto bad_area;
232  } else {
233  /* Allow reads even for write-only mappings */
234  if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
235  goto bad_area;
236  }
237 
238  /*
239  * If for any reason at all we couldn't handle the fault,
240  * make sure we exit gracefully rather than endlessly redo
241  * the fault.
242  */
243  fault = handle_mm_fault(mm, vma, address, flags);
244 
245  if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
246  return;
247 
248  if (unlikely(fault & VM_FAULT_ERROR)) {
249  if (fault & VM_FAULT_OOM)
250  goto out_of_memory;
251  else if (fault & VM_FAULT_SIGBUS)
252  goto do_sigbus;
253  BUG();
254  }
255 
256  if (flags & FAULT_FLAG_ALLOW_RETRY) {
257  if (fault & VM_FAULT_MAJOR) {
258  current->maj_flt++;
259  perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
260  1, regs, address);
261  } else {
262  current->min_flt++;
263  perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
264  1, regs, address);
265  }
266  if (fault & VM_FAULT_RETRY) {
267  flags &= ~FAULT_FLAG_ALLOW_RETRY;
268  flags |= FAULT_FLAG_TRIED;
269 
270  /* No need to up_read(&mm->mmap_sem) as we would
271  * have already released it in __lock_page_or_retry
272  * in mm/filemap.c.
273  */
274 
275  goto retry;
276  }
277  }
278 
279  up_read(&mm->mmap_sem);
280  return;
281 
282  /*
283  * Something tried to access memory that isn't in our memory map..
284  * Fix it, but check if it's kernel or user first..
285  */
286 bad_area:
287  up_read(&mm->mmap_sem);
288 
289 bad_area_nosemaphore:
290  /* User mode accesses just cause a SIGSEGV */
291  if (from_user) {
292  do_fault_siginfo(code, SIGSEGV, regs, text_fault);
293  return;
294  }
295 
296  /* Is this in ex_table? */
297 no_context:
298  g2 = regs->u_regs[UREG_G2];
299  if (!from_user) {
300  fixup = search_extables_range(regs->pc, &g2);
301  /* Values below 10 are reserved for other things */
302  if (fixup > 10) {
303  extern const unsigned __memset_start[];
304  extern const unsigned __memset_end[];
305  extern const unsigned __csum_partial_copy_start[];
306  extern const unsigned __csum_partial_copy_end[];
307 
308 #ifdef DEBUG_EXCEPTIONS
309  printk("Exception: PC<%08lx> faddr<%08lx>\n",
310  regs->pc, address);
311  printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
312  regs->pc, fixup, g2);
313 #endif
314  if ((regs->pc >= (unsigned long)__memset_start &&
315  regs->pc < (unsigned long)__memset_end) ||
316  (regs->pc >= (unsigned long)__csum_partial_copy_start &&
317  regs->pc < (unsigned long)__csum_partial_copy_end)) {
318  regs->u_regs[UREG_I4] = address;
319  regs->u_regs[UREG_I5] = regs->pc;
320  }
321  regs->u_regs[UREG_G2] = g2;
322  regs->pc = fixup;
323  regs->npc = regs->pc + 4;
324  return;
325  }
326  }
327 
328  unhandled_fault(address, tsk, regs);
329  do_exit(SIGKILL);
330 
331 /*
332  * We ran out of memory, or some other thing happened to us that made
333  * us unable to handle the page fault gracefully.
334  */
336  up_read(&mm->mmap_sem);
337  if (from_user) {
339  return;
340  }
341  goto no_context;
342 
343 do_sigbus:
344  up_read(&mm->mmap_sem);
345  do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
346  if (!from_user)
347  goto no_context;
348 
349 vmalloc_fault:
350  {
351  /*
352  * Synchronize this task's top level page-table
353  * with the 'reference' page table.
354  */
355  int offset = pgd_index(address);
356  pgd_t *pgd, *pgd_k;
357  pmd_t *pmd, *pmd_k;
358 
359  pgd = tsk->active_mm->pgd + offset;
360  pgd_k = init_mm.pgd + offset;
361 
362  if (!pgd_present(*pgd)) {
363  if (!pgd_present(*pgd_k))
364  goto bad_area_nosemaphore;
365  pgd_val(*pgd) = pgd_val(*pgd_k);
366  return;
367  }
368 
369  pmd = pmd_offset(pgd, address);
370  pmd_k = pmd_offset(pgd_k, address);
371 
372  if (pmd_present(*pmd) || !pmd_present(*pmd_k))
373  goto bad_area_nosemaphore;
374 
375  *pmd = *pmd_k;
376  return;
377  }
378 }
379 
380 /* This always deals with user addresses. */
381 static void force_user_fault(unsigned long address, int write)
382 {
383  struct vm_area_struct *vma;
384  struct task_struct *tsk = current;
385  struct mm_struct *mm = tsk->mm;
386  int code;
387 
388  code = SEGV_MAPERR;
389 
390  down_read(&mm->mmap_sem);
391  vma = find_vma(mm, address);
392  if (!vma)
393  goto bad_area;
394  if (vma->vm_start <= address)
395  goto good_area;
396  if (!(vma->vm_flags & VM_GROWSDOWN))
397  goto bad_area;
398  if (expand_stack(vma, address))
399  goto bad_area;
400 good_area:
401  code = SEGV_ACCERR;
402  if (write) {
403  if (!(vma->vm_flags & VM_WRITE))
404  goto bad_area;
405  } else {
406  if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
407  goto bad_area;
408  }
409  switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
410  case VM_FAULT_SIGBUS:
411  case VM_FAULT_OOM:
412  goto do_sigbus;
413  }
414  up_read(&mm->mmap_sem);
415  return;
416 bad_area:
417  up_read(&mm->mmap_sem);
418  __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
419  return;
420 
421 do_sigbus:
422  up_read(&mm->mmap_sem);
423  __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
424 }
425 
426 static void check_stack_aligned(unsigned long sp)
427 {
428  if (sp & 0x7UL)
430 }
431 
433 {
434  unsigned long sp;
435 
436  sp = current_thread_info()->rwbuf_stkptrs[0];
437  if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
438  force_user_fault(sp + 0x38, 1);
439  force_user_fault(sp, 1);
440 
441  check_stack_aligned(sp);
442 }
443 
444 void window_underflow_fault(unsigned long sp)
445 {
446  if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
447  force_user_fault(sp + 0x38, 0);
448  force_user_fault(sp, 0);
449 
450  check_stack_aligned(sp);
451 }
452 
453 void window_ret_fault(struct pt_regs *regs)
454 {
455  unsigned long sp;
456 
457  sp = regs->u_regs[UREG_FP];
458  if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
459  force_user_fault(sp + 0x38, 0);
460  force_user_fault(sp, 0);
461 
462  check_stack_aligned(sp);
463 }