Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ptrace.c
Go to the documentation of this file.
1 /*
2  * PowerPC version
3  * Copyright (C) 1995-1996 Gary Thomas ([email protected])
4  *
5  * Derived from "arch/m68k/kernel/ptrace.c"
6  * Copyright (C) 1994 by Hamish Macdonald
7  * Taken from linux/kernel/ptrace.c and modified for M680x0.
8  * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9  *
10  * Modified by Cort Dougan ([email protected])
11  * and Paul Mackerras ([email protected]).
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License. See the file README.legal in the main directory of
15  * this archive for more details.
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 
36 #include <asm/uaccess.h>
37 #include <asm/page.h>
38 #include <asm/pgtable.h>
39 #include <asm/switch_to.h>
40 
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/syscalls.h>
43 
44 /*
45  * The parameter save area on the stack is used to store arguments being passed
46  * to callee function and is located at fixed offset from stack pointer.
47  */
48 #ifdef CONFIG_PPC32
49 #define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
50 #else /* CONFIG_PPC32 */
51 #define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
52 #endif
53 
54 struct pt_regs_offset {
55  const char *name;
56  int offset;
57 };
58 
59 #define STR(s) #s /* convert to string */
60 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
61 #define GPR_OFFSET_NAME(num) \
62  {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
63 #define REG_OFFSET_END {.name = NULL, .offset = 0}
64 
65 static const struct pt_regs_offset regoffset_table[] = {
66  GPR_OFFSET_NAME(0),
67  GPR_OFFSET_NAME(1),
68  GPR_OFFSET_NAME(2),
69  GPR_OFFSET_NAME(3),
70  GPR_OFFSET_NAME(4),
71  GPR_OFFSET_NAME(5),
72  GPR_OFFSET_NAME(6),
73  GPR_OFFSET_NAME(7),
74  GPR_OFFSET_NAME(8),
75  GPR_OFFSET_NAME(9),
76  GPR_OFFSET_NAME(10),
77  GPR_OFFSET_NAME(11),
78  GPR_OFFSET_NAME(12),
79  GPR_OFFSET_NAME(13),
80  GPR_OFFSET_NAME(14),
81  GPR_OFFSET_NAME(15),
82  GPR_OFFSET_NAME(16),
83  GPR_OFFSET_NAME(17),
84  GPR_OFFSET_NAME(18),
85  GPR_OFFSET_NAME(19),
86  GPR_OFFSET_NAME(20),
87  GPR_OFFSET_NAME(21),
88  GPR_OFFSET_NAME(22),
89  GPR_OFFSET_NAME(23),
90  GPR_OFFSET_NAME(24),
91  GPR_OFFSET_NAME(25),
92  GPR_OFFSET_NAME(26),
93  GPR_OFFSET_NAME(27),
94  GPR_OFFSET_NAME(28),
95  GPR_OFFSET_NAME(29),
96  GPR_OFFSET_NAME(30),
97  GPR_OFFSET_NAME(31),
98  REG_OFFSET_NAME(nip),
99  REG_OFFSET_NAME(msr),
100  REG_OFFSET_NAME(ctr),
102  REG_OFFSET_NAME(xer),
104 #ifdef CONFIG_PPC64
105  REG_OFFSET_NAME(softe),
106 #else
107  REG_OFFSET_NAME(mq),
108 #endif
111  REG_OFFSET_NAME(dsisr),
113 };
114 
123 {
124  const struct pt_regs_offset *roff;
125  for (roff = regoffset_table; roff->name != NULL; roff++)
126  if (!strcmp(roff->name, name))
127  return roff->offset;
128  return -EINVAL;
129 }
130 
138 const char *regs_query_register_name(unsigned int offset)
139 {
140  const struct pt_regs_offset *roff;
141  for (roff = regoffset_table; roff->name != NULL; roff++)
142  if (roff->offset == offset)
143  return roff->name;
144  return NULL;
145 }
146 
147 /*
148  * does not yet catch signals sent when the child dies.
149  * in exit.c or in signal.c.
150  */
151 
152 /*
153  * Set of msr bits that gdb can change on behalf of a process.
154  */
155 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
156 #define MSR_DEBUGCHANGE 0
157 #else
158 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
159 #endif
160 
161 /*
162  * Max register writeable via put_reg
163  */
164 #ifdef CONFIG_PPC32
165 #define PT_MAX_PUT_REG PT_MQ
166 #else
167 #define PT_MAX_PUT_REG PT_CCR
168 #endif
169 
170 static unsigned long get_user_msr(struct task_struct *task)
171 {
172  return task->thread.regs->msr | task->thread.fpexc_mode;
173 }
174 
175 static int set_user_msr(struct task_struct *task, unsigned long msr)
176 {
177  task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
178  task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
179  return 0;
180 }
181 
182 /*
183  * We prevent mucking around with the reserved area of trap
184  * which are used internally by the kernel.
185  */
186 static int set_user_trap(struct task_struct *task, unsigned long trap)
187 {
188  task->thread.regs->trap = trap & 0xfff0;
189  return 0;
190 }
191 
192 /*
193  * Get contents of register REGNO in task TASK.
194  */
195 unsigned long ptrace_get_reg(struct task_struct *task, int regno)
196 {
197  if (task->thread.regs == NULL)
198  return -EIO;
199 
200  if (regno == PT_MSR)
201  return get_user_msr(task);
202 
203  if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
204  return ((unsigned long *)task->thread.regs)[regno];
205 
206  return -EIO;
207 }
208 
209 /*
210  * Write contents of register REGNO in task TASK.
211  */
212 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
213 {
214  if (task->thread.regs == NULL)
215  return -EIO;
216 
217  if (regno == PT_MSR)
218  return set_user_msr(task, data);
219  if (regno == PT_TRAP)
220  return set_user_trap(task, data);
221 
222  if (regno <= PT_MAX_PUT_REG) {
223  ((unsigned long *)task->thread.regs)[regno] = data;
224  return 0;
225  }
226  return -EIO;
227 }
228 
229 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
230  unsigned int pos, unsigned int count,
231  void *kbuf, void __user *ubuf)
232 {
233  int i, ret;
234 
235  if (target->thread.regs == NULL)
236  return -EIO;
237 
238  if (!FULL_REGS(target->thread.regs)) {
239  /* We have a partial register set. Fill 14-31 with bogus values */
240  for (i = 14; i < 32; i++)
241  target->thread.regs->gpr[i] = NV_REG_POISON;
242  }
243 
244  ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
245  target->thread.regs,
246  0, offsetof(struct pt_regs, msr));
247  if (!ret) {
248  unsigned long msr = get_user_msr(target);
249  ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
250  offsetof(struct pt_regs, msr),
251  offsetof(struct pt_regs, msr) +
252  sizeof(msr));
253  }
254 
255  BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
256  offsetof(struct pt_regs, msr) + sizeof(long));
257 
258  if (!ret)
259  ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
260  &target->thread.regs->orig_gpr3,
261  offsetof(struct pt_regs, orig_gpr3),
262  sizeof(struct pt_regs));
263  if (!ret)
264  ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
265  sizeof(struct pt_regs), -1);
266 
267  return ret;
268 }
269 
270 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
271  unsigned int pos, unsigned int count,
272  const void *kbuf, const void __user *ubuf)
273 {
274  unsigned long reg;
275  int ret;
276 
277  if (target->thread.regs == NULL)
278  return -EIO;
279 
280  CHECK_FULL_REGS(target->thread.regs);
281 
282  ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
283  target->thread.regs,
284  0, PT_MSR * sizeof(reg));
285 
286  if (!ret && count > 0) {
287  ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
288  PT_MSR * sizeof(reg),
289  (PT_MSR + 1) * sizeof(reg));
290  if (!ret)
291  ret = set_user_msr(target, reg);
292  }
293 
294  BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
295  offsetof(struct pt_regs, msr) + sizeof(long));
296 
297  if (!ret)
298  ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
299  &target->thread.regs->orig_gpr3,
300  PT_ORIG_R3 * sizeof(reg),
301  (PT_MAX_PUT_REG + 1) * sizeof(reg));
302 
303  if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
304  ret = user_regset_copyin_ignore(
305  &pos, &count, &kbuf, &ubuf,
306  (PT_MAX_PUT_REG + 1) * sizeof(reg),
307  PT_TRAP * sizeof(reg));
308 
309  if (!ret && count > 0) {
310  ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
311  PT_TRAP * sizeof(reg),
312  (PT_TRAP + 1) * sizeof(reg));
313  if (!ret)
314  ret = set_user_trap(target, reg);
315  }
316 
317  if (!ret)
318  ret = user_regset_copyin_ignore(
319  &pos, &count, &kbuf, &ubuf,
320  (PT_TRAP + 1) * sizeof(reg), -1);
321 
322  return ret;
323 }
324 
325 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
326  unsigned int pos, unsigned int count,
327  void *kbuf, void __user *ubuf)
328 {
329 #ifdef CONFIG_VSX
330  double buf[33];
331  int i;
332 #endif
333  flush_fp_to_thread(target);
334 
335 #ifdef CONFIG_VSX
336  /* copy to local buffer then write that out */
337  for (i = 0; i < 32 ; i++)
338  buf[i] = target->thread.TS_FPR(i);
339  memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
340  return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
341 
342 #else
343  BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
344  offsetof(struct thread_struct, TS_FPR(32)));
345 
346  return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
347  &target->thread.fpr, 0, -1);
348 #endif
349 }
350 
351 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
352  unsigned int pos, unsigned int count,
353  const void *kbuf, const void __user *ubuf)
354 {
355 #ifdef CONFIG_VSX
356  double buf[33];
357  int i;
358 #endif
359  flush_fp_to_thread(target);
360 
361 #ifdef CONFIG_VSX
362  /* copy to local buffer then write that out */
363  i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
364  if (i)
365  return i;
366  for (i = 0; i < 32 ; i++)
367  target->thread.TS_FPR(i) = buf[i];
368  memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
369  return 0;
370 #else
371  BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
372  offsetof(struct thread_struct, TS_FPR(32)));
373 
374  return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
375  &target->thread.fpr, 0, -1);
376 #endif
377 }
378 
379 #ifdef CONFIG_ALTIVEC
380 /*
381  * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
382  * The transfer totals 34 quadword. Quadwords 0-31 contain the
383  * corresponding vector registers. Quadword 32 contains the vscr as the
384  * last word (offset 12) within that quadword. Quadword 33 contains the
385  * vrsave as the first word (offset 0) within the quadword.
386  *
387  * This definition of the VMX state is compatible with the current PPC32
388  * ptrace interface. This allows signal handling and ptrace to use the
389  * same structures. This also simplifies the implementation of a bi-arch
390  * (combined (32- and 64-bit) gdb.
391  */
392 
393 static int vr_active(struct task_struct *target,
394  const struct user_regset *regset)
395 {
396  flush_altivec_to_thread(target);
397  return target->thread.used_vr ? regset->n : 0;
398 }
399 
400 static int vr_get(struct task_struct *target, const struct user_regset *regset,
401  unsigned int pos, unsigned int count,
402  void *kbuf, void __user *ubuf)
403 {
404  int ret;
405 
406  flush_altivec_to_thread(target);
407 
408  BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
409  offsetof(struct thread_struct, vr[32]));
410 
411  ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
412  &target->thread.vr, 0,
413  33 * sizeof(vector128));
414  if (!ret) {
415  /*
416  * Copy out only the low-order word of vrsave.
417  */
418  union {
420  u32 word;
421  } vrsave;
422  memset(&vrsave, 0, sizeof(vrsave));
423  vrsave.word = target->thread.vrsave;
424  ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
425  33 * sizeof(vector128), -1);
426  }
427 
428  return ret;
429 }
430 
431 static int vr_set(struct task_struct *target, const struct user_regset *regset,
432  unsigned int pos, unsigned int count,
433  const void *kbuf, const void __user *ubuf)
434 {
435  int ret;
436 
437  flush_altivec_to_thread(target);
438 
439  BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
440  offsetof(struct thread_struct, vr[32]));
441 
442  ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
443  &target->thread.vr, 0, 33 * sizeof(vector128));
444  if (!ret && count > 0) {
445  /*
446  * We use only the first word of vrsave.
447  */
448  union {
450  u32 word;
451  } vrsave;
452  memset(&vrsave, 0, sizeof(vrsave));
453  vrsave.word = target->thread.vrsave;
454  ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
455  33 * sizeof(vector128), -1);
456  if (!ret)
457  target->thread.vrsave = vrsave.word;
458  }
459 
460  return ret;
461 }
462 #endif /* CONFIG_ALTIVEC */
463 
464 #ifdef CONFIG_VSX
465 /*
466  * Currently to set and and get all the vsx state, you need to call
467  * the fp and VMX calls as well. This only get/sets the lower 32
468  * 128bit VSX registers.
469  */
470 
471 static int vsr_active(struct task_struct *target,
472  const struct user_regset *regset)
473 {
474  flush_vsx_to_thread(target);
475  return target->thread.used_vsr ? regset->n : 0;
476 }
477 
478 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
479  unsigned int pos, unsigned int count,
480  void *kbuf, void __user *ubuf)
481 {
482  double buf[32];
483  int ret, i;
484 
485  flush_vsx_to_thread(target);
486 
487  for (i = 0; i < 32 ; i++)
488  buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
489  ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
490  buf, 0, 32 * sizeof(double));
491 
492  return ret;
493 }
494 
495 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
496  unsigned int pos, unsigned int count,
497  const void *kbuf, const void __user *ubuf)
498 {
499  double buf[32];
500  int ret,i;
501 
502  flush_vsx_to_thread(target);
503 
504  ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
505  buf, 0, 32 * sizeof(double));
506  for (i = 0; i < 32 ; i++)
507  target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
508 
509 
510  return ret;
511 }
512 #endif /* CONFIG_VSX */
513 
514 #ifdef CONFIG_SPE
515 
516 /*
517  * For get_evrregs/set_evrregs functions 'data' has the following layout:
518  *
519  * struct {
520  * u32 evr[32];
521  * u64 acc;
522  * u32 spefscr;
523  * }
524  */
525 
526 static int evr_active(struct task_struct *target,
527  const struct user_regset *regset)
528 {
529  flush_spe_to_thread(target);
530  return target->thread.used_spe ? regset->n : 0;
531 }
532 
533 static int evr_get(struct task_struct *target, const struct user_regset *regset,
534  unsigned int pos, unsigned int count,
535  void *kbuf, void __user *ubuf)
536 {
537  int ret;
538 
539  flush_spe_to_thread(target);
540 
541  ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
542  &target->thread.evr,
543  0, sizeof(target->thread.evr));
544 
545  BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
546  offsetof(struct thread_struct, spefscr));
547 
548  if (!ret)
549  ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
550  &target->thread.acc,
551  sizeof(target->thread.evr), -1);
552 
553  return ret;
554 }
555 
556 static int evr_set(struct task_struct *target, const struct user_regset *regset,
557  unsigned int pos, unsigned int count,
558  const void *kbuf, const void __user *ubuf)
559 {
560  int ret;
561 
562  flush_spe_to_thread(target);
563 
564  ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
565  &target->thread.evr,
566  0, sizeof(target->thread.evr));
567 
568  BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
569  offsetof(struct thread_struct, spefscr));
570 
571  if (!ret)
572  ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
573  &target->thread.acc,
574  sizeof(target->thread.evr), -1);
575 
576  return ret;
577 }
578 #endif /* CONFIG_SPE */
579 
580 
581 /*
582  * These are our native regset flavors.
583  */
587 #ifdef CONFIG_ALTIVEC
588  REGSET_VMX,
589 #endif
590 #ifdef CONFIG_VSX
591  REGSET_VSX,
592 #endif
593 #ifdef CONFIG_SPE
594  REGSET_SPE,
595 #endif
596 };
597 
598 static const struct user_regset native_regsets[] = {
599  [REGSET_GPR] = {
600  .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
601  .size = sizeof(long), .align = sizeof(long),
602  .get = gpr_get, .set = gpr_set
603  },
604  [REGSET_FPR] = {
605  .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
606  .size = sizeof(double), .align = sizeof(double),
607  .get = fpr_get, .set = fpr_set
608  },
609 #ifdef CONFIG_ALTIVEC
610  [REGSET_VMX] = {
611  .core_note_type = NT_PPC_VMX, .n = 34,
612  .size = sizeof(vector128), .align = sizeof(vector128),
613  .active = vr_active, .get = vr_get, .set = vr_set
614  },
615 #endif
616 #ifdef CONFIG_VSX
617  [REGSET_VSX] = {
618  .core_note_type = NT_PPC_VSX, .n = 32,
619  .size = sizeof(double), .align = sizeof(double),
620  .active = vsr_active, .get = vsr_get, .set = vsr_set
621  },
622 #endif
623 #ifdef CONFIG_SPE
624  [REGSET_SPE] = {
625  .n = 35,
626  .size = sizeof(u32), .align = sizeof(u32),
627  .active = evr_active, .get = evr_get, .set = evr_set
628  },
629 #endif
630 };
631 
632 static const struct user_regset_view user_ppc_native_view = {
633  .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
634  .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
635 };
636 
637 #ifdef CONFIG_PPC64
638 #include <linux/compat.h>
639 
640 static int gpr32_get(struct task_struct *target,
641  const struct user_regset *regset,
642  unsigned int pos, unsigned int count,
643  void *kbuf, void __user *ubuf)
644 {
645  const unsigned long *regs = &target->thread.regs->gpr[0];
646  compat_ulong_t *k = kbuf;
647  compat_ulong_t __user *u = ubuf;
649  int i;
650 
651  if (target->thread.regs == NULL)
652  return -EIO;
653 
654  if (!FULL_REGS(target->thread.regs)) {
655  /* We have a partial register set. Fill 14-31 with bogus values */
656  for (i = 14; i < 32; i++)
657  target->thread.regs->gpr[i] = NV_REG_POISON;
658  }
659 
660  pos /= sizeof(reg);
661  count /= sizeof(reg);
662 
663  if (kbuf)
664  for (; count > 0 && pos < PT_MSR; --count)
665  *k++ = regs[pos++];
666  else
667  for (; count > 0 && pos < PT_MSR; --count)
668  if (__put_user((compat_ulong_t) regs[pos++], u++))
669  return -EFAULT;
670 
671  if (count > 0 && pos == PT_MSR) {
672  reg = get_user_msr(target);
673  if (kbuf)
674  *k++ = reg;
675  else if (__put_user(reg, u++))
676  return -EFAULT;
677  ++pos;
678  --count;
679  }
680 
681  if (kbuf)
682  for (; count > 0 && pos < PT_REGS_COUNT; --count)
683  *k++ = regs[pos++];
684  else
685  for (; count > 0 && pos < PT_REGS_COUNT; --count)
686  if (__put_user((compat_ulong_t) regs[pos++], u++))
687  return -EFAULT;
688 
689  kbuf = k;
690  ubuf = u;
691  pos *= sizeof(reg);
692  count *= sizeof(reg);
693  return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
694  PT_REGS_COUNT * sizeof(reg), -1);
695 }
696 
697 static int gpr32_set(struct task_struct *target,
698  const struct user_regset *regset,
699  unsigned int pos, unsigned int count,
700  const void *kbuf, const void __user *ubuf)
701 {
702  unsigned long *regs = &target->thread.regs->gpr[0];
703  const compat_ulong_t *k = kbuf;
704  const compat_ulong_t __user *u = ubuf;
706 
707  if (target->thread.regs == NULL)
708  return -EIO;
709 
710  CHECK_FULL_REGS(target->thread.regs);
711 
712  pos /= sizeof(reg);
713  count /= sizeof(reg);
714 
715  if (kbuf)
716  for (; count > 0 && pos < PT_MSR; --count)
717  regs[pos++] = *k++;
718  else
719  for (; count > 0 && pos < PT_MSR; --count) {
720  if (__get_user(reg, u++))
721  return -EFAULT;
722  regs[pos++] = reg;
723  }
724 
725 
726  if (count > 0 && pos == PT_MSR) {
727  if (kbuf)
728  reg = *k++;
729  else if (__get_user(reg, u++))
730  return -EFAULT;
731  set_user_msr(target, reg);
732  ++pos;
733  --count;
734  }
735 
736  if (kbuf) {
737  for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
738  regs[pos++] = *k++;
739  for (; count > 0 && pos < PT_TRAP; --count, ++pos)
740  ++k;
741  } else {
742  for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
743  if (__get_user(reg, u++))
744  return -EFAULT;
745  regs[pos++] = reg;
746  }
747  for (; count > 0 && pos < PT_TRAP; --count, ++pos)
748  if (__get_user(reg, u++))
749  return -EFAULT;
750  }
751 
752  if (count > 0 && pos == PT_TRAP) {
753  if (kbuf)
754  reg = *k++;
755  else if (__get_user(reg, u++))
756  return -EFAULT;
757  set_user_trap(target, reg);
758  ++pos;
759  --count;
760  }
761 
762  kbuf = k;
763  ubuf = u;
764  pos *= sizeof(reg);
765  count *= sizeof(reg);
766  return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
767  (PT_TRAP + 1) * sizeof(reg), -1);
768 }
769 
770 /*
771  * These are the regset flavors matching the CONFIG_PPC32 native set.
772  */
773 static const struct user_regset compat_regsets[] = {
774  [REGSET_GPR] = {
776  .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
777  .get = gpr32_get, .set = gpr32_set
778  },
779  [REGSET_FPR] = {
780  .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
781  .size = sizeof(double), .align = sizeof(double),
782  .get = fpr_get, .set = fpr_set
783  },
784 #ifdef CONFIG_ALTIVEC
785  [REGSET_VMX] = {
786  .core_note_type = NT_PPC_VMX, .n = 34,
787  .size = sizeof(vector128), .align = sizeof(vector128),
788  .active = vr_active, .get = vr_get, .set = vr_set
789  },
790 #endif
791 #ifdef CONFIG_SPE
792  [REGSET_SPE] = {
793  .core_note_type = NT_PPC_SPE, .n = 35,
794  .size = sizeof(u32), .align = sizeof(u32),
795  .active = evr_active, .get = evr_get, .set = evr_set
796  },
797 #endif
798 };
799 
800 static const struct user_regset_view user_ppc_compat_view = {
801  .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
802  .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
803 };
804 #endif /* CONFIG_PPC64 */
805 
807 {
808 #ifdef CONFIG_PPC64
809  if (test_tsk_thread_flag(task, TIF_32BIT))
810  return &user_ppc_compat_view;
811 #endif
812  return &user_ppc_native_view;
813 }
814 
815 
817 {
818  struct pt_regs *regs = task->thread.regs;
819 
820  if (regs != NULL) {
821 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
822  task->thread.dbcr0 &= ~DBCR0_BT;
823  task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
824  regs->msr |= MSR_DE;
825 #else
826  regs->msr &= ~MSR_BE;
827  regs->msr |= MSR_SE;
828 #endif
829  }
830  set_tsk_thread_flag(task, TIF_SINGLESTEP);
831 }
832 
834 {
835  struct pt_regs *regs = task->thread.regs;
836 
837  if (regs != NULL) {
838 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
839  task->thread.dbcr0 &= ~DBCR0_IC;
840  task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
841  regs->msr |= MSR_DE;
842 #else
843  regs->msr &= ~MSR_SE;
844  regs->msr |= MSR_BE;
845 #endif
846  }
847  set_tsk_thread_flag(task, TIF_SINGLESTEP);
848 }
849 
851 {
852  struct pt_regs *regs = task->thread.regs;
853 
854  if (regs != NULL) {
855 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
856  /*
857  * The logic to disable single stepping should be as
858  * simple as turning off the Instruction Complete flag.
859  * And, after doing so, if all debug flags are off, turn
860  * off DBCR0(IDM) and MSR(DE) .... Torez
861  */
862  task->thread.dbcr0 &= ~DBCR0_IC;
863  /*
864  * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
865  */
866  if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
867  task->thread.dbcr1)) {
868  /*
869  * All debug events were off.....
870  */
871  task->thread.dbcr0 &= ~DBCR0_IDM;
872  regs->msr &= ~MSR_DE;
873  }
874 #else
875  regs->msr &= ~(MSR_SE | MSR_BE);
876 #endif
877  }
878  clear_tsk_thread_flag(task, TIF_SINGLESTEP);
879 }
880 
881 #ifdef CONFIG_HAVE_HW_BREAKPOINT
882 void ptrace_triggered(struct perf_event *bp,
883  struct perf_sample_data *data, struct pt_regs *regs)
884 {
885  struct perf_event_attr attr;
886 
887  /*
888  * Disable the breakpoint request here since ptrace has defined a
889  * one-shot behaviour for breakpoint exceptions in PPC64.
890  * The SIGTRAP signal is generated automatically for us in do_dabr().
891  * We don't have to do anything about that here
892  */
893  attr = bp->attr;
894  attr.disabled = true;
895  modify_user_hw_breakpoint(bp, &attr);
896 }
897 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
898 
899 int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
900  unsigned long data)
901 {
902 #ifdef CONFIG_HAVE_HW_BREAKPOINT
903  int ret;
904  struct thread_struct *thread = &(task->thread);
905  struct perf_event *bp;
906  struct perf_event_attr attr;
907 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
908 
909  /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
910  * For embedded processors we support one DAC and no IAC's at the
911  * moment.
912  */
913  if (addr > 0)
914  return -EINVAL;
915 
916  /* The bottom 3 bits in dabr are flags */
917  if ((data & ~0x7UL) >= TASK_SIZE)
918  return -EIO;
919 
920 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
921  /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
922  * It was assumed, on previous implementations, that 3 bits were
923  * passed together with the data address, fitting the design of the
924  * DABR register, as follows:
925  *
926  * bit 0: Read flag
927  * bit 1: Write flag
928  * bit 2: Breakpoint translation
929  *
930  * Thus, we use them here as so.
931  */
932 
933  /* Ensure breakpoint translation bit is set */
934  if (data && !(data & DABR_TRANSLATION))
935  return -EIO;
936 #ifdef CONFIG_HAVE_HW_BREAKPOINT
937  if (ptrace_get_breakpoints(task) < 0)
938  return -ESRCH;
939 
940  bp = thread->ptrace_bps[0];
941  if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
942  if (bp) {
944  thread->ptrace_bps[0] = NULL;
945  }
946  ptrace_put_breakpoints(task);
947  return 0;
948  }
949  if (bp) {
950  attr = bp->attr;
951  attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
953  (DABR_DATA_WRITE | DABR_DATA_READ),
954  &attr.bp_type);
955  ret = modify_user_hw_breakpoint(bp, &attr);
956  if (ret) {
957  ptrace_put_breakpoints(task);
958  return ret;
959  }
960  thread->ptrace_bps[0] = bp;
961  ptrace_put_breakpoints(task);
962  thread->dabr = data;
963  thread->dabrx = DABRX_ALL;
964  return 0;
965  }
966 
967  /* Create a new breakpoint request if one doesn't exist already */
968  hw_breakpoint_init(&attr);
969  attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
970  arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ),
971  &attr.bp_type);
972 
973  thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
974  ptrace_triggered, NULL, task);
975  if (IS_ERR(bp)) {
976  thread->ptrace_bps[0] = NULL;
977  ptrace_put_breakpoints(task);
978  return PTR_ERR(bp);
979  }
980 
981  ptrace_put_breakpoints(task);
982 
983 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
984 
985  /* Move contents to the DABR register */
986  task->thread.dabr = data;
987  task->thread.dabrx = DABRX_ALL;
988 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
989  /* As described above, it was assumed 3 bits were passed with the data
990  * address, but we will assume only the mode bits will be passed
991  * as to not cause alignment restrictions for DAC-based processors.
992  */
993 
994  /* DAC's hold the whole address without any mode flags */
995  task->thread.dac1 = data & ~0x3UL;
996 
997  if (task->thread.dac1 == 0) {
998  dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
999  if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
1000  task->thread.dbcr1)) {
1001  task->thread.regs->msr &= ~MSR_DE;
1002  task->thread.dbcr0 &= ~DBCR0_IDM;
1003  }
1004  return 0;
1005  }
1006 
1007  /* Read or Write bits must be set */
1008 
1009  if (!(data & 0x3UL))
1010  return -EINVAL;
1011 
1012  /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
1013  register */
1014  task->thread.dbcr0 |= DBCR0_IDM;
1015 
1016  /* Check for write and read flags and set DBCR0
1017  accordingly */
1018  dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
1019  if (data & 0x1UL)
1020  dbcr_dac(task) |= DBCR_DAC1R;
1021  if (data & 0x2UL)
1022  dbcr_dac(task) |= DBCR_DAC1W;
1023  task->thread.regs->msr |= MSR_DE;
1024 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1025  return 0;
1026 }
1027 
1028 /*
1029  * Called by kernel/ptrace.c when detaching..
1030  *
1031  * Make sure single step bits etc are not set.
1032  */
1034 {
1035  /* make sure the single step bit is not set. */
1036  user_disable_single_step(child);
1037 }
1038 
1039 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1040 static long set_intruction_bp(struct task_struct *child,
1041  struct ppc_hw_breakpoint *bp_info)
1042 {
1043  int slot;
1044  int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0);
1045  int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0);
1046  int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0);
1047  int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0);
1048 
1049  if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1050  slot2_in_use = 1;
1051  if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1052  slot4_in_use = 1;
1053 
1054  if (bp_info->addr >= TASK_SIZE)
1055  return -EIO;
1056 
1057  if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
1058 
1059  /* Make sure range is valid. */
1060  if (bp_info->addr2 >= TASK_SIZE)
1061  return -EIO;
1062 
1063  /* We need a pair of IAC regsisters */
1064  if ((!slot1_in_use) && (!slot2_in_use)) {
1065  slot = 1;
1066  child->thread.iac1 = bp_info->addr;
1067  child->thread.iac2 = bp_info->addr2;
1068  child->thread.dbcr0 |= DBCR0_IAC1;
1069  if (bp_info->addr_mode ==
1071  dbcr_iac_range(child) |= DBCR_IAC12X;
1072  else
1073  dbcr_iac_range(child) |= DBCR_IAC12I;
1074 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1075  } else if ((!slot3_in_use) && (!slot4_in_use)) {
1076  slot = 3;
1077  child->thread.iac3 = bp_info->addr;
1078  child->thread.iac4 = bp_info->addr2;
1079  child->thread.dbcr0 |= DBCR0_IAC3;
1080  if (bp_info->addr_mode ==
1082  dbcr_iac_range(child) |= DBCR_IAC34X;
1083  else
1084  dbcr_iac_range(child) |= DBCR_IAC34I;
1085 #endif
1086  } else
1087  return -ENOSPC;
1088  } else {
1089  /* We only need one. If possible leave a pair free in
1090  * case a range is needed later
1091  */
1092  if (!slot1_in_use) {
1093  /*
1094  * Don't use iac1 if iac1-iac2 are free and either
1095  * iac3 or iac4 (but not both) are free
1096  */
1097  if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
1098  slot = 1;
1099  child->thread.iac1 = bp_info->addr;
1100  child->thread.dbcr0 |= DBCR0_IAC1;
1101  goto out;
1102  }
1103  }
1104  if (!slot2_in_use) {
1105  slot = 2;
1106  child->thread.iac2 = bp_info->addr;
1107  child->thread.dbcr0 |= DBCR0_IAC2;
1108 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1109  } else if (!slot3_in_use) {
1110  slot = 3;
1111  child->thread.iac3 = bp_info->addr;
1112  child->thread.dbcr0 |= DBCR0_IAC3;
1113  } else if (!slot4_in_use) {
1114  slot = 4;
1115  child->thread.iac4 = bp_info->addr;
1116  child->thread.dbcr0 |= DBCR0_IAC4;
1117 #endif
1118  } else
1119  return -ENOSPC;
1120  }
1121 out:
1122  child->thread.dbcr0 |= DBCR0_IDM;
1123  child->thread.regs->msr |= MSR_DE;
1124 
1125  return slot;
1126 }
1127 
1128 static int del_instruction_bp(struct task_struct *child, int slot)
1129 {
1130  switch (slot) {
1131  case 1:
1132  if ((child->thread.dbcr0 & DBCR0_IAC1) == 0)
1133  return -ENOENT;
1134 
1135  if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
1136  /* address range - clear slots 1 & 2 */
1137  child->thread.iac2 = 0;
1138  dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
1139  }
1140  child->thread.iac1 = 0;
1141  child->thread.dbcr0 &= ~DBCR0_IAC1;
1142  break;
1143  case 2:
1144  if ((child->thread.dbcr0 & DBCR0_IAC2) == 0)
1145  return -ENOENT;
1146 
1147  if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1148  /* used in a range */
1149  return -EINVAL;
1150  child->thread.iac2 = 0;
1151  child->thread.dbcr0 &= ~DBCR0_IAC2;
1152  break;
1153 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1154  case 3:
1155  if ((child->thread.dbcr0 & DBCR0_IAC3) == 0)
1156  return -ENOENT;
1157 
1158  if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
1159  /* address range - clear slots 3 & 4 */
1160  child->thread.iac4 = 0;
1161  dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
1162  }
1163  child->thread.iac3 = 0;
1164  child->thread.dbcr0 &= ~DBCR0_IAC3;
1165  break;
1166  case 4:
1167  if ((child->thread.dbcr0 & DBCR0_IAC4) == 0)
1168  return -ENOENT;
1169 
1170  if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1171  /* Used in a range */
1172  return -EINVAL;
1173  child->thread.iac4 = 0;
1174  child->thread.dbcr0 &= ~DBCR0_IAC4;
1175  break;
1176 #endif
1177  default:
1178  return -EINVAL;
1179  }
1180  return 0;
1181 }
1182 
1183 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
1184 {
1185  int byte_enable =
1187  & 0xf;
1188  int condition_mode =
1190  int slot;
1191 
1192  if (byte_enable && (condition_mode == 0))
1193  return -EINVAL;
1194 
1195  if (bp_info->addr >= TASK_SIZE)
1196  return -EIO;
1197 
1198  if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
1199  slot = 1;
1201  dbcr_dac(child) |= DBCR_DAC1R;
1203  dbcr_dac(child) |= DBCR_DAC1W;
1204  child->thread.dac1 = (unsigned long)bp_info->addr;
1205 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1206  if (byte_enable) {
1207  child->thread.dvc1 =
1208  (unsigned long)bp_info->condition_value;
1209  child->thread.dbcr2 |=
1210  ((byte_enable << DBCR2_DVC1BE_SHIFT) |
1211  (condition_mode << DBCR2_DVC1M_SHIFT));
1212  }
1213 #endif
1214 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1215  } else if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
1216  /* Both dac1 and dac2 are part of a range */
1217  return -ENOSPC;
1218 #endif
1219  } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
1220  slot = 2;
1222  dbcr_dac(child) |= DBCR_DAC2R;
1224  dbcr_dac(child) |= DBCR_DAC2W;
1225  child->thread.dac2 = (unsigned long)bp_info->addr;
1226 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1227  if (byte_enable) {
1228  child->thread.dvc2 =
1229  (unsigned long)bp_info->condition_value;
1230  child->thread.dbcr2 |=
1231  ((byte_enable << DBCR2_DVC2BE_SHIFT) |
1232  (condition_mode << DBCR2_DVC2M_SHIFT));
1233  }
1234 #endif
1235  } else
1236  return -ENOSPC;
1237  child->thread.dbcr0 |= DBCR0_IDM;
1238  child->thread.regs->msr |= MSR_DE;
1239 
1240  return slot + 4;
1241 }
1242 
1243 static int del_dac(struct task_struct *child, int slot)
1244 {
1245  if (slot == 1) {
1246  if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
1247  return -ENOENT;
1248 
1249  child->thread.dac1 = 0;
1250  dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1251 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1252  if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
1253  child->thread.dac2 = 0;
1254  child->thread.dbcr2 &= ~DBCR2_DAC12MODE;
1255  }
1256  child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
1257 #endif
1258 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1259  child->thread.dvc1 = 0;
1260 #endif
1261  } else if (slot == 2) {
1262  if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
1263  return -ENOENT;
1264 
1265 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1266  if (child->thread.dbcr2 & DBCR2_DAC12MODE)
1267  /* Part of a range */
1268  return -EINVAL;
1269  child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
1270 #endif
1271 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1272  child->thread.dvc2 = 0;
1273 #endif
1274  child->thread.dac2 = 0;
1275  dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1276  } else
1277  return -EINVAL;
1278 
1279  return 0;
1280 }
1281 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1282 
1283 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1284 static int set_dac_range(struct task_struct *child,
1285  struct ppc_hw_breakpoint *bp_info)
1286 {
1287  int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
1288 
1289  /* We don't allow range watchpoints to be used with DVC */
1290  if (bp_info->condition_mode)
1291  return -EINVAL;
1292 
1293  /*
1294  * Best effort to verify the address range. The user/supervisor bits
1295  * prevent trapping in kernel space, but let's fail on an obvious bad
1296  * range. The simple test on the mask is not fool-proof, and any
1297  * exclusive range will spill over into kernel space.
1298  */
1299  if (bp_info->addr >= TASK_SIZE)
1300  return -EIO;
1301  if (mode == PPC_BREAKPOINT_MODE_MASK) {
1302  /*
1303  * dac2 is a bitmask. Don't allow a mask that makes a
1304  * kernel space address from a valid dac1 value
1305  */
1306  if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
1307  return -EIO;
1308  } else {
1309  /*
1310  * For range breakpoints, addr2 must also be a valid address
1311  */
1312  if (bp_info->addr2 >= TASK_SIZE)
1313  return -EIO;
1314  }
1315 
1316  if (child->thread.dbcr0 &
1317  (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
1318  return -ENOSPC;
1319 
1321  child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
1323  child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
1324  child->thread.dac1 = bp_info->addr;
1325  child->thread.dac2 = bp_info->addr2;
1327  child->thread.dbcr2 |= DBCR2_DAC12M;
1328  else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1329  child->thread.dbcr2 |= DBCR2_DAC12MX;
1330  else /* PPC_BREAKPOINT_MODE_MASK */
1331  child->thread.dbcr2 |= DBCR2_DAC12MM;
1332  child->thread.regs->msr |= MSR_DE;
1333 
1334  return 5;
1335 }
1336 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
1337 
1338 static long ppc_set_hwdebug(struct task_struct *child,
1339  struct ppc_hw_breakpoint *bp_info)
1340 {
1341 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
1342  unsigned long dabr;
1343 #endif
1344 
1345  if (bp_info->version != 1)
1346  return -ENOTSUPP;
1347 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1348  /*
1349  * Check for invalid flags and combinations
1350  */
1351  if ((bp_info->trigger_type == 0) ||
1354  (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
1355  (bp_info->condition_mode &
1358  return -EINVAL;
1359 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
1361  return -EINVAL;
1362 #endif
1363 
1365  if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
1367  return -EINVAL;
1368  return set_intruction_bp(child, bp_info);
1369  }
1370  if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
1371  return set_dac(child, bp_info);
1372 
1373 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1374  return set_dac_range(child, bp_info);
1375 #else
1376  return -EINVAL;
1377 #endif
1378 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1379  /*
1380  * We only support one data breakpoint
1381  */
1382  if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
1383  (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
1384  bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
1386  return -EINVAL;
1387 
1388  if (child->thread.dabr)
1389  return -ENOSPC;
1390 
1391  if ((unsigned long)bp_info->addr >= TASK_SIZE)
1392  return -EIO;
1393 
1394  dabr = (unsigned long)bp_info->addr & ~7UL;
1395  dabr |= DABR_TRANSLATION;
1397  dabr |= DABR_DATA_READ;
1399  dabr |= DABR_DATA_WRITE;
1400 
1401  child->thread.dabr = dabr;
1402  child->thread.dabrx = DABRX_ALL;
1403 
1404  return 1;
1405 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1406 }
1407 
1408 static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
1409 {
1410 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1411  int rc;
1412 
1413  if (data <= 4)
1414  rc = del_instruction_bp(child, (int)data);
1415  else
1416  rc = del_dac(child, (int)data - 4);
1417 
1418  if (!rc) {
1419  if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0,
1420  child->thread.dbcr1)) {
1421  child->thread.dbcr0 &= ~DBCR0_IDM;
1422  child->thread.regs->msr &= ~MSR_DE;
1423  }
1424  }
1425  return rc;
1426 #else
1427  if (data != 1)
1428  return -EINVAL;
1429  if (child->thread.dabr == 0)
1430  return -ENOENT;
1431 
1432  child->thread.dabr = 0;
1433 
1434  return 0;
1435 #endif
1436 }
1437 
1438 long arch_ptrace(struct task_struct *child, long request,
1439  unsigned long addr, unsigned long data)
1440 {
1441  int ret = -EPERM;
1442  void __user *datavp = (void __user *) data;
1443  unsigned long __user *datalp = datavp;
1444 
1445  switch (request) {
1446  /* read the word at location addr in the USER area. */
1447  case PTRACE_PEEKUSR: {
1448  unsigned long index, tmp;
1449 
1450  ret = -EIO;
1451  /* convert to index and check */
1452 #ifdef CONFIG_PPC32
1453  index = addr >> 2;
1454  if ((addr & 3) || (index > PT_FPSCR)
1455  || (child->thread.regs == NULL))
1456 #else
1457  index = addr >> 3;
1458  if ((addr & 7) || (index > PT_FPSCR))
1459 #endif
1460  break;
1461 
1462  CHECK_FULL_REGS(child->thread.regs);
1463  if (index < PT_FPR0) {
1464  tmp = ptrace_get_reg(child, (int) index);
1465  } else {
1466  unsigned int fpidx = index - PT_FPR0;
1467 
1468  flush_fp_to_thread(child);
1469  if (fpidx < (PT_FPSCR - PT_FPR0))
1470  tmp = ((unsigned long *)child->thread.fpr)
1471  [fpidx * TS_FPRWIDTH];
1472  else
1473  tmp = child->thread.fpscr.val;
1474  }
1475  ret = put_user(tmp, datalp);
1476  break;
1477  }
1478 
1479  /* write the word at location addr in the USER area */
1480  case PTRACE_POKEUSR: {
1481  unsigned long index;
1482 
1483  ret = -EIO;
1484  /* convert to index and check */
1485 #ifdef CONFIG_PPC32
1486  index = addr >> 2;
1487  if ((addr & 3) || (index > PT_FPSCR)
1488  || (child->thread.regs == NULL))
1489 #else
1490  index = addr >> 3;
1491  if ((addr & 7) || (index > PT_FPSCR))
1492 #endif
1493  break;
1494 
1495  CHECK_FULL_REGS(child->thread.regs);
1496  if (index < PT_FPR0) {
1497  ret = ptrace_put_reg(child, index, data);
1498  } else {
1499  unsigned int fpidx = index - PT_FPR0;
1500 
1501  flush_fp_to_thread(child);
1502  if (fpidx < (PT_FPSCR - PT_FPR0))
1503  ((unsigned long *)child->thread.fpr)
1504  [fpidx * TS_FPRWIDTH] = data;
1505  else
1506  child->thread.fpscr.val = data;
1507  ret = 0;
1508  }
1509  break;
1510  }
1511 
1512  case PPC_PTRACE_GETHWDBGINFO: {
1513  struct ppc_debug_info dbginfo;
1514 
1515  dbginfo.version = 1;
1516 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1517  dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
1518  dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
1519  dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
1520  dbginfo.data_bp_alignment = 4;
1521  dbginfo.sizeof_condition = 4;
1524 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1525  dbginfo.features |=
1528 #endif
1529 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
1530  dbginfo.num_instruction_bps = 0;
1531  dbginfo.num_data_bps = 1;
1532  dbginfo.num_condition_regs = 0;
1533 #ifdef CONFIG_PPC64
1534  dbginfo.data_bp_alignment = 8;
1535 #else
1536  dbginfo.data_bp_alignment = 4;
1537 #endif
1538  dbginfo.sizeof_condition = 0;
1539  dbginfo.features = 0;
1540 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1541 
1542  if (!access_ok(VERIFY_WRITE, datavp,
1543  sizeof(struct ppc_debug_info)))
1544  return -EFAULT;
1545  ret = __copy_to_user(datavp, &dbginfo,
1546  sizeof(struct ppc_debug_info)) ?
1547  -EFAULT : 0;
1548  break;
1549  }
1550 
1551  case PPC_PTRACE_SETHWDEBUG: {
1552  struct ppc_hw_breakpoint bp_info;
1553 
1554  if (!access_ok(VERIFY_READ, datavp,
1555  sizeof(struct ppc_hw_breakpoint)))
1556  return -EFAULT;
1557  ret = __copy_from_user(&bp_info, datavp,
1558  sizeof(struct ppc_hw_breakpoint)) ?
1559  -EFAULT : 0;
1560  if (!ret)
1561  ret = ppc_set_hwdebug(child, &bp_info);
1562  break;
1563  }
1564 
1565  case PPC_PTRACE_DELHWDEBUG: {
1566  ret = ppc_del_hwdebug(child, addr, data);
1567  break;
1568  }
1569 
1570  case PTRACE_GET_DEBUGREG: {
1571  ret = -EINVAL;
1572  /* We only support one DABR and no IABRS at the moment */
1573  if (addr > 0)
1574  break;
1575 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1576  ret = put_user(child->thread.dac1, datalp);
1577 #else
1578  ret = put_user(child->thread.dabr, datalp);
1579 #endif
1580  break;
1581  }
1582 
1583  case PTRACE_SET_DEBUGREG:
1584  ret = ptrace_set_debugreg(child, addr, data);
1585  break;
1586 
1587 #ifdef CONFIG_PPC64
1588  case PTRACE_GETREGS64:
1589 #endif
1590  case PTRACE_GETREGS: /* Get all pt_regs from the child. */
1591  return copy_regset_to_user(child, &user_ppc_native_view,
1592  REGSET_GPR,
1593  0, sizeof(struct pt_regs),
1594  datavp);
1595 
1596 #ifdef CONFIG_PPC64
1597  case PTRACE_SETREGS64:
1598 #endif
1599  case PTRACE_SETREGS: /* Set all gp regs in the child. */
1600  return copy_regset_from_user(child, &user_ppc_native_view,
1601  REGSET_GPR,
1602  0, sizeof(struct pt_regs),
1603  datavp);
1604 
1605  case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
1606  return copy_regset_to_user(child, &user_ppc_native_view,
1607  REGSET_FPR,
1608  0, sizeof(elf_fpregset_t),
1609  datavp);
1610 
1611  case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
1612  return copy_regset_from_user(child, &user_ppc_native_view,
1613  REGSET_FPR,
1614  0, sizeof(elf_fpregset_t),
1615  datavp);
1616 
1617 #ifdef CONFIG_ALTIVEC
1618  case PTRACE_GETVRREGS:
1619  return copy_regset_to_user(child, &user_ppc_native_view,
1620  REGSET_VMX,
1621  0, (33 * sizeof(vector128) +
1622  sizeof(u32)),
1623  datavp);
1624 
1625  case PTRACE_SETVRREGS:
1626  return copy_regset_from_user(child, &user_ppc_native_view,
1627  REGSET_VMX,
1628  0, (33 * sizeof(vector128) +
1629  sizeof(u32)),
1630  datavp);
1631 #endif
1632 #ifdef CONFIG_VSX
1633  case PTRACE_GETVSRREGS:
1634  return copy_regset_to_user(child, &user_ppc_native_view,
1635  REGSET_VSX,
1636  0, 32 * sizeof(double),
1637  datavp);
1638 
1639  case PTRACE_SETVSRREGS:
1640  return copy_regset_from_user(child, &user_ppc_native_view,
1641  REGSET_VSX,
1642  0, 32 * sizeof(double),
1643  datavp);
1644 #endif
1645 #ifdef CONFIG_SPE
1646  case PTRACE_GETEVRREGS:
1647  /* Get the child spe register state. */
1648  return copy_regset_to_user(child, &user_ppc_native_view,
1649  REGSET_SPE, 0, 35 * sizeof(u32),
1650  datavp);
1651 
1652  case PTRACE_SETEVRREGS:
1653  /* Set the child spe register state. */
1654  return copy_regset_from_user(child, &user_ppc_native_view,
1655  REGSET_SPE, 0, 35 * sizeof(u32),
1656  datavp);
1657 #endif
1658 
1659  default:
1660  ret = ptrace_request(child, request, addr, data);
1661  break;
1662  }
1663  return ret;
1664 }
1665 
1666 /*
1667  * We must return the syscall number to actually look up in the table.
1668  * This can be -1L to skip running any syscall at all.
1669  */
1671 {
1672  long ret = 0;
1673 
1674  secure_computing_strict(regs->gpr[0]);
1675 
1676  if (test_thread_flag(TIF_SYSCALL_TRACE) &&
1677  tracehook_report_syscall_entry(regs))
1678  /*
1679  * Tracing decided this syscall should not happen.
1680  * We'll return a bogus call number to get an ENOSYS
1681  * error, but leave the original number in regs->gpr[0].
1682  */
1683  ret = -1L;
1684 
1685  if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1686  trace_sys_enter(regs, regs->gpr[0]);
1687 
1688 #ifdef CONFIG_PPC64
1689  if (!is_32bit_task())
1690  audit_syscall_entry(AUDIT_ARCH_PPC64,
1691  regs->gpr[0],
1692  regs->gpr[3], regs->gpr[4],
1693  regs->gpr[5], regs->gpr[6]);
1694  else
1695 #endif
1696  audit_syscall_entry(AUDIT_ARCH_PPC,
1697  regs->gpr[0],
1698  regs->gpr[3] & 0xffffffff,
1699  regs->gpr[4] & 0xffffffff,
1700  regs->gpr[5] & 0xffffffff,
1701  regs->gpr[6] & 0xffffffff);
1702 
1703  return ret ?: regs->gpr[0];
1704 }
1705 
1707 {
1708  int step;
1709 
1710  audit_syscall_exit(regs);
1711 
1712  if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1713  trace_sys_exit(regs, regs->result);
1714 
1715  step = test_thread_flag(TIF_SINGLESTEP);
1716  if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1717  tracehook_report_syscall_exit(regs, step);
1718 }