Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ptrace.c
Go to the documentation of this file.
1 /*
2  * Kernel support for the ptrace() and syscall tracing interfaces.
3  *
4  * Copyright (C) 1999-2005 Hewlett-Packard Co
5  * David Mosberger-Tang <[email protected]>
6  * Copyright (C) 2006 Intel Co
7  * 2006-08-12 - IA64 Native Utrace implementation support added by
8  * Anil S Keshavamurthy <[email protected]>
9  *
10  * Derived from the x86 and Alpha versions.
11  */
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/regset.h>
22 #include <linux/elf.h>
23 #include <linux/tracehook.h>
24 
25 #include <asm/pgtable.h>
26 #include <asm/processor.h>
27 #include <asm/ptrace_offsets.h>
28 #include <asm/rse.h>
29 #include <asm/uaccess.h>
30 #include <asm/unwind.h>
31 #ifdef CONFIG_PERFMON
32 #include <asm/perfmon.h>
33 #endif
34 
35 #include "entry.h"
36 
37 /*
38  * Bits in the PSR that we allow ptrace() to change:
39  * be, up, ac, mfl, mfh (the user mask; five bits total)
40  * db (debug breakpoint fault; one bit)
41  * id (instruction debug fault disable; one bit)
42  * dd (data debug fault disable; one bit)
43  * ri (restart instruction; two bits)
44  * is (instruction set; one bit)
45  */
46 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
47  | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
48 
49 #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
50 #define PFM_MASK MASK(38)
51 
52 #define PTRACE_DEBUG 0
53 
54 #if PTRACE_DEBUG
55 # define dprintk(format...) printk(format)
56 # define inline
57 #else
58 # define dprintk(format...)
59 #endif
60 
61 /* Return TRUE if PT was created due to kernel-entry via a system-call. */
62 
63 static inline int
64 in_syscall (struct pt_regs *pt)
65 {
66  return (long) pt->cr_ifs >= 0;
67 }
68 
69 /*
70  * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
71  * bitset where bit i is set iff the NaT bit of register i is set.
72  */
73 unsigned long
74 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
75 {
76 # define GET_BITS(first, last, unat) \
77  ({ \
78  unsigned long bit = ia64_unat_pos(&pt->r##first); \
79  unsigned long nbits = (last - first + 1); \
80  unsigned long mask = MASK(nbits) << first; \
81  unsigned long dist; \
82  if (bit < first) \
83  dist = 64 + bit - first; \
84  else \
85  dist = bit - first; \
86  ia64_rotr(unat, dist) & mask; \
87  })
88  unsigned long val;
89 
90  /*
91  * Registers that are stored consecutively in struct pt_regs
92  * can be handled in parallel. If the register order in
93  * struct_pt_regs changes, this code MUST be updated.
94  */
95  val = GET_BITS( 1, 1, scratch_unat);
96  val |= GET_BITS( 2, 3, scratch_unat);
97  val |= GET_BITS(12, 13, scratch_unat);
98  val |= GET_BITS(14, 14, scratch_unat);
99  val |= GET_BITS(15, 15, scratch_unat);
100  val |= GET_BITS( 8, 11, scratch_unat);
101  val |= GET_BITS(16, 31, scratch_unat);
102  return val;
103 
104 # undef GET_BITS
105 }
106 
107 /*
108  * Set the NaT bits for the scratch registers according to NAT and
109  * return the resulting unat (assuming the scratch registers are
110  * stored in PT).
111  */
112 unsigned long
113 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
114 {
115 # define PUT_BITS(first, last, nat) \
116  ({ \
117  unsigned long bit = ia64_unat_pos(&pt->r##first); \
118  unsigned long nbits = (last - first + 1); \
119  unsigned long mask = MASK(nbits) << first; \
120  long dist; \
121  if (bit < first) \
122  dist = 64 + bit - first; \
123  else \
124  dist = bit - first; \
125  ia64_rotl(nat & mask, dist); \
126  })
127  unsigned long scratch_unat;
128 
129  /*
130  * Registers that are stored consecutively in struct pt_regs
131  * can be handled in parallel. If the register order in
132  * struct_pt_regs changes, this code MUST be updated.
133  */
134  scratch_unat = PUT_BITS( 1, 1, nat);
135  scratch_unat |= PUT_BITS( 2, 3, nat);
136  scratch_unat |= PUT_BITS(12, 13, nat);
137  scratch_unat |= PUT_BITS(14, 14, nat);
138  scratch_unat |= PUT_BITS(15, 15, nat);
139  scratch_unat |= PUT_BITS( 8, 11, nat);
140  scratch_unat |= PUT_BITS(16, 31, nat);
141 
142  return scratch_unat;
143 
144 # undef PUT_BITS
145 }
146 
147 #define IA64_MLX_TEMPLATE 0x2
148 #define IA64_MOVL_OPCODE 6
149 
150 void
152 {
153  unsigned long w0, ri = ia64_psr(regs)->ri + 1;
154 
155  if (ri > 2) {
156  ri = 0;
157  regs->cr_iip += 16;
158  } else if (ri == 2) {
159  get_user(w0, (char __user *) regs->cr_iip + 0);
160  if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
161  /*
162  * rfi'ing to slot 2 of an MLX bundle causes
163  * an illegal operation fault. We don't want
164  * that to happen...
165  */
166  ri = 0;
167  regs->cr_iip += 16;
168  }
169  }
170  ia64_psr(regs)->ri = ri;
171 }
172 
173 void
175 {
176  unsigned long w0, ri = ia64_psr(regs)->ri - 1;
177 
178  if (ia64_psr(regs)->ri == 0) {
179  regs->cr_iip -= 16;
180  ri = 2;
181  get_user(w0, (char __user *) regs->cr_iip + 0);
182  if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
183  /*
184  * rfi'ing to slot 2 of an MLX bundle causes
185  * an illegal operation fault. We don't want
186  * that to happen...
187  */
188  ri = 1;
189  }
190  }
191  ia64_psr(regs)->ri = ri;
192 }
193 
194 /*
195  * This routine is used to read an rnat bits that are stored on the
196  * kernel backing store. Since, in general, the alignment of the user
197  * and kernel are different, this is not completely trivial. In
198  * essence, we need to construct the user RNAT based on up to two
199  * kernel RNAT values and/or the RNAT value saved in the child's
200  * pt_regs.
201  *
202  * user rbs
203  *
204  * +--------+ <-- lowest address
205  * | slot62 |
206  * +--------+
207  * | rnat | 0x....1f8
208  * +--------+
209  * | slot00 | \
210  * +--------+ |
211  * | slot01 | > child_regs->ar_rnat
212  * +--------+ |
213  * | slot02 | / kernel rbs
214  * +--------+ +--------+
215  * <- child_regs->ar_bspstore | slot61 | <-- krbs
216  * +- - - - + +--------+
217  * | slot62 |
218  * +- - - - + +--------+
219  * | rnat |
220  * +- - - - + +--------+
221  * vrnat | slot00 |
222  * +- - - - + +--------+
223  * = =
224  * +--------+
225  * | slot00 | \
226  * +--------+ |
227  * | slot01 | > child_stack->ar_rnat
228  * +--------+ |
229  * | slot02 | /
230  * +--------+
231  * <--- child_stack->ar_bspstore
232  *
233  * The way to think of this code is as follows: bit 0 in the user rnat
234  * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
235  * value. The kernel rnat value holding this bit is stored in
236  * variable rnat0. rnat1 is loaded with the kernel rnat value that
237  * form the upper bits of the user rnat value.
238  *
239  * Boundary cases:
240  *
241  * o when reading the rnat "below" the first rnat slot on the kernel
242  * backing store, rnat0/rnat1 are set to 0 and the low order bits are
243  * merged in from pt->ar_rnat.
244  *
245  * o when reading the rnat "above" the last rnat slot on the kernel
246  * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
247  */
248 static unsigned long
249 get_rnat (struct task_struct *task, struct switch_stack *sw,
250  unsigned long *krbs, unsigned long *urnat_addr,
251  unsigned long *urbs_end)
252 {
253  unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
254  unsigned long umask = 0, mask, m;
255  unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
256  long num_regs, nbits;
257  struct pt_regs *pt;
258 
259  pt = task_pt_regs(task);
260  kbsp = (unsigned long *) sw->ar_bspstore;
261  ubspstore = (unsigned long *) pt->ar_bspstore;
262 
263  if (urbs_end < urnat_addr)
264  nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
265  else
266  nbits = 63;
267  mask = MASK(nbits);
268  /*
269  * First, figure out which bit number slot 0 in user-land maps
270  * to in the kernel rnat. Do this by figuring out how many
271  * register slots we're beyond the user's backingstore and
272  * then computing the equivalent address in kernel space.
273  */
274  num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
275  slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
276  shift = ia64_rse_slot_num(slot0_kaddr);
277  rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
278  rnat0_kaddr = rnat1_kaddr - 64;
279 
280  if (ubspstore + 63 > urnat_addr) {
281  /* some bits need to be merged in from pt->ar_rnat */
282  umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
283  urnat = (pt->ar_rnat & umask);
284  mask &= ~umask;
285  if (!mask)
286  return urnat;
287  }
288 
289  m = mask << shift;
290  if (rnat0_kaddr >= kbsp)
291  rnat0 = sw->ar_rnat;
292  else if (rnat0_kaddr > krbs)
293  rnat0 = *rnat0_kaddr;
294  urnat |= (rnat0 & m) >> shift;
295 
296  m = mask >> (63 - shift);
297  if (rnat1_kaddr >= kbsp)
298  rnat1 = sw->ar_rnat;
299  else if (rnat1_kaddr > krbs)
300  rnat1 = *rnat1_kaddr;
301  urnat |= (rnat1 & m) << (63 - shift);
302  return urnat;
303 }
304 
305 /*
306  * The reverse of get_rnat.
307  */
308 static void
309 put_rnat (struct task_struct *task, struct switch_stack *sw,
310  unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
311  unsigned long *urbs_end)
312 {
313  unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
314  unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
315  long num_regs, nbits;
316  struct pt_regs *pt;
317  unsigned long cfm, *urbs_kargs;
318 
319  pt = task_pt_regs(task);
320  kbsp = (unsigned long *) sw->ar_bspstore;
321  ubspstore = (unsigned long *) pt->ar_bspstore;
322 
323  urbs_kargs = urbs_end;
324  if (in_syscall(pt)) {
325  /*
326  * If entered via syscall, don't allow user to set rnat bits
327  * for syscall args.
328  */
329  cfm = pt->cr_ifs;
330  urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
331  }
332 
333  if (urbs_kargs >= urnat_addr)
334  nbits = 63;
335  else {
336  if ((urnat_addr - 63) >= urbs_kargs)
337  return;
338  nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
339  }
340  mask = MASK(nbits);
341 
342  /*
343  * First, figure out which bit number slot 0 in user-land maps
344  * to in the kernel rnat. Do this by figuring out how many
345  * register slots we're beyond the user's backingstore and
346  * then computing the equivalent address in kernel space.
347  */
348  num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
349  slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
350  shift = ia64_rse_slot_num(slot0_kaddr);
351  rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
352  rnat0_kaddr = rnat1_kaddr - 64;
353 
354  if (ubspstore + 63 > urnat_addr) {
355  /* some bits need to be place in pt->ar_rnat: */
356  umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
357  pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
358  mask &= ~umask;
359  if (!mask)
360  return;
361  }
362  /*
363  * Note: Section 11.1 of the EAS guarantees that bit 63 of an
364  * rnat slot is ignored. so we don't have to clear it here.
365  */
366  rnat0 = (urnat << shift);
367  m = mask << shift;
368  if (rnat0_kaddr >= kbsp)
369  sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
370  else if (rnat0_kaddr > krbs)
371  *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
372 
373  rnat1 = (urnat >> (63 - shift));
374  m = mask >> (63 - shift);
375  if (rnat1_kaddr >= kbsp)
376  sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
377  else if (rnat1_kaddr > krbs)
378  *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
379 }
380 
381 static inline int
382 on_kernel_rbs (unsigned long addr, unsigned long bspstore,
383  unsigned long urbs_end)
384 {
385  unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
386  urbs_end);
387  return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
388 }
389 
390 /*
391  * Read a word from the user-level backing store of task CHILD. ADDR
392  * is the user-level address to read the word from, VAL a pointer to
393  * the return value, and USER_BSP gives the end of the user-level
394  * backing store (i.e., it's the address that would be in ar.bsp after
395  * the user executed a "cover" instruction).
396  *
397  * This routine takes care of accessing the kernel register backing
398  * store for those registers that got spilled there. It also takes
399  * care of calculating the appropriate RNaT collection words.
400  */
401 long
402 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
403  unsigned long user_rbs_end, unsigned long addr, long *val)
404 {
405  unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
406  struct pt_regs *child_regs;
407  size_t copied;
408  long ret;
409 
410  urbs_end = (long *) user_rbs_end;
411  laddr = (unsigned long *) addr;
412  child_regs = task_pt_regs(child);
413  bspstore = (unsigned long *) child_regs->ar_bspstore;
414  krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
415  if (on_kernel_rbs(addr, (unsigned long) bspstore,
416  (unsigned long) urbs_end))
417  {
418  /*
419  * Attempt to read the RBS in an area that's actually
420  * on the kernel RBS => read the corresponding bits in
421  * the kernel RBS.
422  */
423  rnat_addr = ia64_rse_rnat_addr(laddr);
424  ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
425 
426  if (laddr == rnat_addr) {
427  /* return NaT collection word itself */
428  *val = ret;
429  return 0;
430  }
431 
432  if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
433  /*
434  * It is implementation dependent whether the
435  * data portion of a NaT value gets saved on a
436  * st8.spill or RSE spill (e.g., see EAS 2.6,
437  * 4.4.4.6 Register Spill and Fill). To get
438  * consistent behavior across all possible
439  * IA-64 implementations, we return zero in
440  * this case.
441  */
442  *val = 0;
443  return 0;
444  }
445 
446  if (laddr < urbs_end) {
447  /*
448  * The desired word is on the kernel RBS and
449  * is not a NaT.
450  */
451  regnum = ia64_rse_num_regs(bspstore, laddr);
452  *val = *ia64_rse_skip_regs(krbs, regnum);
453  return 0;
454  }
455  }
456  copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
457  if (copied != sizeof(ret))
458  return -EIO;
459  *val = ret;
460  return 0;
461 }
462 
463 long
464 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
465  unsigned long user_rbs_end, unsigned long addr, long val)
466 {
467  unsigned long *bspstore, *krbs, regnum, *laddr;
468  unsigned long *urbs_end = (long *) user_rbs_end;
469  struct pt_regs *child_regs;
470 
471  laddr = (unsigned long *) addr;
472  child_regs = task_pt_regs(child);
473  bspstore = (unsigned long *) child_regs->ar_bspstore;
474  krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
475  if (on_kernel_rbs(addr, (unsigned long) bspstore,
476  (unsigned long) urbs_end))
477  {
478  /*
479  * Attempt to write the RBS in an area that's actually
480  * on the kernel RBS => write the corresponding bits
481  * in the kernel RBS.
482  */
483  if (ia64_rse_is_rnat_slot(laddr))
484  put_rnat(child, child_stack, krbs, laddr, val,
485  urbs_end);
486  else {
487  if (laddr < urbs_end) {
488  regnum = ia64_rse_num_regs(bspstore, laddr);
489  *ia64_rse_skip_regs(krbs, regnum) = val;
490  }
491  }
492  } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
493  != sizeof(val))
494  return -EIO;
495  return 0;
496 }
497 
498 /*
499  * Calculate the address of the end of the user-level register backing
500  * store. This is the address that would have been stored in ar.bsp
501  * if the user had executed a "cover" instruction right before
502  * entering the kernel. If CFMP is not NULL, it is used to return the
503  * "current frame mask" that was active at the time the kernel was
504  * entered.
505  */
506 unsigned long
508  unsigned long *cfmp)
509 {
510  unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
511  long ndirty;
512 
513  krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
514  bspstore = (unsigned long *) pt->ar_bspstore;
515  ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
516 
517  if (in_syscall(pt))
518  ndirty += (cfm & 0x7f);
519  else
520  cfm &= ~(1UL << 63); /* clear valid bit */
521 
522  if (cfmp)
523  *cfmp = cfm;
524  return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
525 }
526 
527 /*
528  * Synchronize (i.e, write) the RSE backing store living in kernel
529  * space to the VM of the CHILD task. SW and PT are the pointers to
530  * the switch_stack and pt_regs structures, respectively.
531  * USER_RBS_END is the user-level address at which the backing store
532  * ends.
533  */
534 long
536  unsigned long user_rbs_start, unsigned long user_rbs_end)
537 {
538  unsigned long addr, val;
539  long ret;
540 
541  /* now copy word for word from kernel rbs to user rbs: */
542  for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
543  ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
544  if (ret < 0)
545  return ret;
546  if (access_process_vm(child, addr, &val, sizeof(val), 1)
547  != sizeof(val))
548  return -EIO;
549  }
550  return 0;
551 }
552 
553 static long
554 ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
555  unsigned long user_rbs_start, unsigned long user_rbs_end)
556 {
557  unsigned long addr, val;
558  long ret;
559 
560  /* now copy word for word from user rbs to kernel rbs: */
561  for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
562  if (access_process_vm(child, addr, &val, sizeof(val), 0)
563  != sizeof(val))
564  return -EIO;
565 
566  ret = ia64_poke(child, sw, user_rbs_end, addr, val);
567  if (ret < 0)
568  return ret;
569  }
570  return 0;
571 }
572 
573 typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
574  unsigned long, unsigned long);
575 
576 static void do_sync_rbs(struct unw_frame_info *info, void *arg)
577 {
578  struct pt_regs *pt;
579  unsigned long urbs_end;
580  syncfunc_t fn = arg;
581 
582  if (unw_unwind_to_user(info) < 0)
583  return;
584  pt = task_pt_regs(info->task);
585  urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
586 
587  fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
588 }
589 
590 /*
591  * when a thread is stopped (ptraced), debugger might change thread's user
592  * stack (change memory directly), and we must avoid the RSE stored in kernel
593  * to override user stack (user space's RSE is newer than kernel's in the
594  * case). To workaround the issue, we copy kernel RSE to user RSE before the
595  * task is stopped, so user RSE has updated data. we then copy user RSE to
596  * kernel after the task is resummed from traced stop and kernel will use the
597  * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
598  * synchronize user RSE to kernel.
599  */
601 {
602  if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
603  return;
604  set_notify_resume(current);
605  unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
606 }
607 
608 /*
609  * This is called to read back the register backing store.
610  */
611 void ia64_sync_krbs(void)
612 {
613  clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
614 
615  unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
616 }
617 
618 /*
619  * After PTRACE_ATTACH, a thread's register backing store area in user
620  * space is assumed to contain correct data whenever the thread is
621  * stopped. arch_ptrace_stop takes care of this on tracing stops.
622  * But if the child was already stopped for job control when we attach
623  * to it, then it might not ever get into ptrace_stop by the time we
624  * want to examine the user memory containing the RBS.
625  */
626 void
628 {
629  int stopped = 0;
630  struct unw_frame_info info;
631 
632  /*
633  * If the child is in TASK_STOPPED, we need to change that to
634  * TASK_TRACED momentarily while we operate on it. This ensures
635  * that the child won't be woken up and return to user mode while
636  * we are doing the sync. (It can only be woken up for SIGKILL.)
637  */
638 
640  if (child->sighand) {
641  spin_lock_irq(&child->sighand->siglock);
642  if (child->state == TASK_STOPPED &&
643  !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
644  set_notify_resume(child);
645 
646  child->state = TASK_TRACED;
647  stopped = 1;
648  }
649  spin_unlock_irq(&child->sighand->siglock);
650  }
652 
653  if (!stopped)
654  return;
655 
656  unw_init_from_blocked_task(&info, child);
657  do_sync_rbs(&info, ia64_sync_user_rbs);
658 
659  /*
660  * Now move the child back into TASK_STOPPED if it should be in a
661  * job control stop, so that SIGCONT can be used to wake it up.
662  */
664  if (child->sighand) {
665  spin_lock_irq(&child->sighand->siglock);
666  if (child->state == TASK_TRACED &&
667  (child->signal->flags & SIGNAL_STOP_STOPPED)) {
668  child->state = TASK_STOPPED;
669  }
670  spin_unlock_irq(&child->sighand->siglock);
671  }
673 }
674 
675 static inline int
676 thread_matches (struct task_struct *thread, unsigned long addr)
677 {
678  unsigned long thread_rbs_end;
679  struct pt_regs *thread_regs;
680 
681  if (ptrace_check_attach(thread, 0) < 0)
682  /*
683  * If the thread is not in an attachable state, we'll
684  * ignore it. The net effect is that if ADDR happens
685  * to overlap with the portion of the thread's
686  * register backing store that is currently residing
687  * on the thread's kernel stack, then ptrace() may end
688  * up accessing a stale value. But if the thread
689  * isn't stopped, that's a problem anyhow, so we're
690  * doing as well as we can...
691  */
692  return 0;
693 
694  thread_regs = task_pt_regs(thread);
695  thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
696  if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
697  return 0;
698 
699  return 1; /* looks like we've got a winner */
700 }
701 
702 /*
703  * Write f32-f127 back to task->thread.fph if it has been modified.
704  */
705 inline void
707 {
708  struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
709 
710  /*
711  * Prevent migrating this task while
712  * we're fiddling with the FPU state
713  */
714  preempt_disable();
715  if (ia64_is_local_fpu_owner(task) && psr->mfh) {
716  psr->mfh = 0;
717  task->thread.flags |= IA64_THREAD_FPH_VALID;
718  ia64_save_fpu(&task->thread.fph[0]);
719  }
720  preempt_enable();
721 }
722 
723 /*
724  * Sync the fph state of the task so that it can be manipulated
725  * through thread.fph. If necessary, f32-f127 are written back to
726  * thread.fph or, if the fph state hasn't been used before, thread.fph
727  * is cleared to zeroes. Also, access to f32-f127 is disabled to
728  * ensure that the task picks up the state from thread.fph when it
729  * executes again.
730  */
731 void
733 {
734  struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
735 
736  ia64_flush_fph(task);
737  if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
738  task->thread.flags |= IA64_THREAD_FPH_VALID;
739  memset(&task->thread.fph, 0, sizeof(task->thread.fph));
740  }
741  ia64_drop_fpu(task);
742  psr->dfh = 1;
743 }
744 
745 /*
746  * Change the machine-state of CHILD such that it will return via the normal
747  * kernel exit-path, rather than the syscall-exit path.
748  */
749 static void
750 convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
751  unsigned long cfm)
752 {
753  struct unw_frame_info info, prev_info;
754  unsigned long ip, sp, pr;
755 
757  while (1) {
758  prev_info = info;
759  if (unw_unwind(&info) < 0)
760  return;
761 
762  unw_get_sp(&info, &sp);
763  if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
764  < IA64_PT_REGS_SIZE) {
765  dprintk("ptrace.%s: ran off the top of the kernel "
766  "stack\n", __func__);
767  return;
768  }
769  if (unw_get_pr (&prev_info, &pr) < 0) {
770  unw_get_rp(&prev_info, &ip);
771  dprintk("ptrace.%s: failed to read "
772  "predicate register (ip=0x%lx)\n",
773  __func__, ip);
774  return;
775  }
776  if (unw_is_intr_frame(&info)
777  && (pr & (1UL << PRED_USER_STACK)))
778  break;
779  }
780 
781  /*
782  * Note: at the time of this call, the target task is blocked
783  * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
784  * (aka, "pLvSys") we redirect execution from
785  * .work_pending_syscall_end to .work_processed_kernel.
786  */
787  unw_get_pr(&prev_info, &pr);
788  pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
789  pr |= (1UL << PRED_NON_SYSCALL);
790  unw_set_pr(&prev_info, pr);
791 
792  pt->cr_ifs = (1UL << 63) | cfm;
793  /*
794  * Clear the memory that is NOT written on syscall-entry to
795  * ensure we do not leak kernel-state to user when execution
796  * resumes.
797  */
798  pt->r2 = 0;
799  pt->r3 = 0;
800  pt->r14 = 0;
801  memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
802  memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
803  pt->b7 = 0;
804  pt->ar_ccv = 0;
805  pt->ar_csd = 0;
806  pt->ar_ssd = 0;
807 }
808 
809 static int
810 access_nat_bits (struct task_struct *child, struct pt_regs *pt,
811  struct unw_frame_info *info,
812  unsigned long *data, int write_access)
813 {
814  unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
815  char nat = 0;
816 
817  if (write_access) {
818  nat_bits = *data;
819  scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
820  if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
821  dprintk("ptrace: failed to set ar.unat\n");
822  return -1;
823  }
824  for (regnum = 4; regnum <= 7; ++regnum) {
825  unw_get_gr(info, regnum, &dummy, &nat);
826  unw_set_gr(info, regnum, dummy,
827  (nat_bits >> regnum) & 1);
828  }
829  } else {
830  if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
831  dprintk("ptrace: failed to read ar.unat\n");
832  return -1;
833  }
834  nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
835  for (regnum = 4; regnum <= 7; ++regnum) {
836  unw_get_gr(info, regnum, &dummy, &nat);
837  nat_bits |= (nat != 0) << regnum;
838  }
839  *data = nat_bits;
840  }
841  return 0;
842 }
843 
844 static int
845 access_uarea (struct task_struct *child, unsigned long addr,
846  unsigned long *data, int write_access);
847 
848 static long
849 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
850 {
851  unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
852  struct unw_frame_info info;
853  struct ia64_fpreg fpval;
854  struct switch_stack *sw;
855  struct pt_regs *pt;
856  long ret, retval = 0;
857  char nat = 0;
858  int i;
859 
860  if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
861  return -EIO;
862 
863  pt = task_pt_regs(child);
864  sw = (struct switch_stack *) (child->thread.ksp + 16);
865  unw_init_from_blocked_task(&info, child);
866  if (unw_unwind_to_user(&info) < 0) {
867  return -EIO;
868  }
869 
870  if (((unsigned long) ppr & 0x7) != 0) {
871  dprintk("ptrace:unaligned register address %p\n", ppr);
872  return -EIO;
873  }
874 
875  if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
876  || access_uarea(child, PT_AR_EC, &ec, 0) < 0
877  || access_uarea(child, PT_AR_LC, &lc, 0) < 0
878  || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
879  || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
880  || access_uarea(child, PT_CFM, &cfm, 0)
881  || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
882  return -EIO;
883 
884  /* control regs */
885 
886  retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
887  retval |= __put_user(psr, &ppr->cr_ipsr);
888 
889  /* app regs */
890 
891  retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
892  retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
893  retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
894  retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
895  retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
896  retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
897 
898  retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
899  retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
900  retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
901  retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
902  retval |= __put_user(cfm, &ppr->cfm);
903 
904  /* gr1-gr3 */
905 
906  retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
907  retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
908 
909  /* gr4-gr7 */
910 
911  for (i = 4; i < 8; i++) {
912  if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
913  return -EIO;
914  retval |= __put_user(val, &ppr->gr[i]);
915  }
916 
917  /* gr8-gr11 */
918 
919  retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
920 
921  /* gr12-gr15 */
922 
923  retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
924  retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
925  retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
926 
927  /* gr16-gr31 */
928 
929  retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
930 
931  /* b0 */
932 
933  retval |= __put_user(pt->b0, &ppr->br[0]);
934 
935  /* b1-b5 */
936 
937  for (i = 1; i < 6; i++) {
938  if (unw_access_br(&info, i, &val, 0) < 0)
939  return -EIO;
940  __put_user(val, &ppr->br[i]);
941  }
942 
943  /* b6-b7 */
944 
945  retval |= __put_user(pt->b6, &ppr->br[6]);
946  retval |= __put_user(pt->b7, &ppr->br[7]);
947 
948  /* fr2-fr5 */
949 
950  for (i = 2; i < 6; i++) {
951  if (unw_get_fr(&info, i, &fpval) < 0)
952  return -EIO;
953  retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
954  }
955 
956  /* fr6-fr11 */
957 
958  retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
959  sizeof(struct ia64_fpreg) * 6);
960 
961  /* fp scratch regs(12-15) */
962 
963  retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
964  sizeof(struct ia64_fpreg) * 4);
965 
966  /* fr16-fr31 */
967 
968  for (i = 16; i < 32; i++) {
969  if (unw_get_fr(&info, i, &fpval) < 0)
970  return -EIO;
971  retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
972  }
973 
974  /* fph */
975 
976  ia64_flush_fph(child);
977  retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
978  sizeof(ppr->fr[32]) * 96);
979 
980  /* preds */
981 
982  retval |= __put_user(pt->pr, &ppr->pr);
983 
984  /* nat bits */
985 
986  retval |= __put_user(nat_bits, &ppr->nat);
987 
988  ret = retval ? -EIO : 0;
989  return ret;
990 }
991 
992 static long
993 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
994 {
995  unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
996  struct unw_frame_info info;
997  struct switch_stack *sw;
998  struct ia64_fpreg fpval;
999  struct pt_regs *pt;
1000  long ret, retval = 0;
1001  int i;
1002 
1003  memset(&fpval, 0, sizeof(fpval));
1004 
1005  if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1006  return -EIO;
1007 
1008  pt = task_pt_regs(child);
1009  sw = (struct switch_stack *) (child->thread.ksp + 16);
1010  unw_init_from_blocked_task(&info, child);
1011  if (unw_unwind_to_user(&info) < 0) {
1012  return -EIO;
1013  }
1014 
1015  if (((unsigned long) ppr & 0x7) != 0) {
1016  dprintk("ptrace:unaligned register address %p\n", ppr);
1017  return -EIO;
1018  }
1019 
1020  /* control regs */
1021 
1022  retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1023  retval |= __get_user(psr, &ppr->cr_ipsr);
1024 
1025  /* app regs */
1026 
1027  retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1028  retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1029  retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1030  retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1031  retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1032  retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1033 
1034  retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1035  retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1036  retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1037  retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1038  retval |= __get_user(cfm, &ppr->cfm);
1039 
1040  /* gr1-gr3 */
1041 
1042  retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1043  retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1044 
1045  /* gr4-gr7 */
1046 
1047  for (i = 4; i < 8; i++) {
1048  retval |= __get_user(val, &ppr->gr[i]);
1049  /* NaT bit will be set via PT_NAT_BITS: */
1050  if (unw_set_gr(&info, i, val, 0) < 0)
1051  return -EIO;
1052  }
1053 
1054  /* gr8-gr11 */
1055 
1056  retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1057 
1058  /* gr12-gr15 */
1059 
1060  retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1061  retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1062  retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1063 
1064  /* gr16-gr31 */
1065 
1066  retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1067 
1068  /* b0 */
1069 
1070  retval |= __get_user(pt->b0, &ppr->br[0]);
1071 
1072  /* b1-b5 */
1073 
1074  for (i = 1; i < 6; i++) {
1075  retval |= __get_user(val, &ppr->br[i]);
1076  unw_set_br(&info, i, val);
1077  }
1078 
1079  /* b6-b7 */
1080 
1081  retval |= __get_user(pt->b6, &ppr->br[6]);
1082  retval |= __get_user(pt->b7, &ppr->br[7]);
1083 
1084  /* fr2-fr5 */
1085 
1086  for (i = 2; i < 6; i++) {
1087  retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1088  if (unw_set_fr(&info, i, fpval) < 0)
1089  return -EIO;
1090  }
1091 
1092  /* fr6-fr11 */
1093 
1094  retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1095  sizeof(ppr->fr[6]) * 6);
1096 
1097  /* fp scratch regs(12-15) */
1098 
1099  retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1100  sizeof(ppr->fr[12]) * 4);
1101 
1102  /* fr16-fr31 */
1103 
1104  for (i = 16; i < 32; i++) {
1105  retval |= __copy_from_user(&fpval, &ppr->fr[i],
1106  sizeof(fpval));
1107  if (unw_set_fr(&info, i, fpval) < 0)
1108  return -EIO;
1109  }
1110 
1111  /* fph */
1112 
1113  ia64_sync_fph(child);
1114  retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1115  sizeof(ppr->fr[32]) * 96);
1116 
1117  /* preds */
1118 
1119  retval |= __get_user(pt->pr, &ppr->pr);
1120 
1121  /* nat bits */
1122 
1123  retval |= __get_user(nat_bits, &ppr->nat);
1124 
1125  retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1126  retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1127  retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1128  retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1129  retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1130  retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1131  retval |= access_uarea(child, PT_CFM, &cfm, 1);
1132  retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1133 
1134  ret = retval ? -EIO : 0;
1135  return ret;
1136 }
1137 
1138 void
1140 {
1141  struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1142 
1143  set_tsk_thread_flag(child, TIF_SINGLESTEP);
1144  child_psr->ss = 1;
1145 }
1146 
1147 void
1149 {
1150  struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1151 
1152  set_tsk_thread_flag(child, TIF_SINGLESTEP);
1153  child_psr->tb = 1;
1154 }
1155 
1156 void
1158 {
1159  struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1160 
1161  /* make sure the single step/taken-branch trap bits are not set: */
1162  clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1163  child_psr->ss = 0;
1164  child_psr->tb = 0;
1165 }
1166 
1167 /*
1168  * Called by kernel/ptrace.c when detaching..
1169  *
1170  * Make sure the single step bit is not set.
1171  */
1172 void
1174 {
1175  user_disable_single_step(child);
1176 }
1177 
1178 long
1179 arch_ptrace (struct task_struct *child, long request,
1180  unsigned long addr, unsigned long data)
1181 {
1182  switch (request) {
1183  case PTRACE_PEEKTEXT:
1184  case PTRACE_PEEKDATA:
1185  /* read word at location addr */
1186  if (access_process_vm(child, addr, &data, sizeof(data), 0)
1187  != sizeof(data))
1188  return -EIO;
1189  /* ensure return value is not mistaken for error code */
1191  return data;
1192 
1193  /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1194  * by the generic ptrace_request().
1195  */
1196 
1197  case PTRACE_PEEKUSR:
1198  /* read the word at addr in the USER area */
1199  if (access_uarea(child, addr, &data, 0) < 0)
1200  return -EIO;
1201  /* ensure return value is not mistaken for error code */
1203  return data;
1204 
1205  case PTRACE_POKEUSR:
1206  /* write the word at addr in the USER area */
1207  if (access_uarea(child, addr, &data, 1) < 0)
1208  return -EIO;
1209  return 0;
1210 
1211  case PTRACE_OLD_GETSIGINFO:
1212  /* for backwards-compatibility */
1213  return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1214 
1215  case PTRACE_OLD_SETSIGINFO:
1216  /* for backwards-compatibility */
1217  return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1218 
1219  case PTRACE_GETREGS:
1220  return ptrace_getregs(child,
1221  (struct pt_all_user_regs __user *) data);
1222 
1223  case PTRACE_SETREGS:
1224  return ptrace_setregs(child,
1225  (struct pt_all_user_regs __user *) data);
1226 
1227  default:
1228  return ptrace_request(child, request, addr, data);
1229  }
1230 }
1231 
1232 
1233 /* "asmlinkage" so the input arguments are preserved... */
1234 
1235 asmlinkage long
1236 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1237  long arg4, long arg5, long arg6, long arg7,
1238  struct pt_regs regs)
1239 {
1240  if (test_thread_flag(TIF_SYSCALL_TRACE))
1241  if (tracehook_report_syscall_entry(&regs))
1242  return -ENOSYS;
1243 
1244  /* copy user rbs to kernel rbs */
1245  if (test_thread_flag(TIF_RESTORE_RSE))
1246  ia64_sync_krbs();
1247 
1248 
1249  audit_syscall_entry(AUDIT_ARCH_IA64, regs.r15, arg0, arg1, arg2, arg3);
1250 
1251  return 0;
1252 }
1253 
1254 /* "asmlinkage" so the input arguments are preserved... */
1255 
1256 asmlinkage void
1257 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1258  long arg4, long arg5, long arg6, long arg7,
1259  struct pt_regs regs)
1260 {
1261  int step;
1262 
1263  audit_syscall_exit(&regs);
1264 
1265  step = test_thread_flag(TIF_SINGLESTEP);
1266  if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1267  tracehook_report_syscall_exit(&regs, step);
1268 
1269  /* copy user rbs to kernel rbs */
1270  if (test_thread_flag(TIF_RESTORE_RSE))
1271  ia64_sync_krbs();
1272 }
1273 
1274 /* Utrace implementation starts here */
1275 struct regset_get {
1276  void *kbuf;
1277  void __user *ubuf;
1278 };
1279 
1280 struct regset_set {
1281  const void *kbuf;
1282  const void __user *ubuf;
1283 };
1284 
1287  const struct user_regset *regset;
1288  union {
1289  struct regset_get get;
1290  struct regset_set set;
1291  } u;
1292  unsigned int pos;
1293  unsigned int count;
1294  int ret;
1295 };
1296 
1297 static int
1298 access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1299  unsigned long addr, unsigned long *data, int write_access)
1300 {
1301  struct pt_regs *pt;
1302  unsigned long *ptr = NULL;
1303  int ret;
1304  char nat = 0;
1305 
1306  pt = task_pt_regs(target);
1307  switch (addr) {
1308  case ELF_GR_OFFSET(1):
1309  ptr = &pt->r1;
1310  break;
1311  case ELF_GR_OFFSET(2):
1312  case ELF_GR_OFFSET(3):
1313  ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1314  break;
1315  case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1316  if (write_access) {
1317  /* read NaT bit first: */
1318  unsigned long dummy;
1319 
1320  ret = unw_get_gr(info, addr/8, &dummy, &nat);
1321  if (ret < 0)
1322  return ret;
1323  }
1324  return unw_access_gr(info, addr/8, data, &nat, write_access);
1325  case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1326  ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1327  break;
1328  case ELF_GR_OFFSET(12):
1329  case ELF_GR_OFFSET(13):
1330  ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1331  break;
1332  case ELF_GR_OFFSET(14):
1333  ptr = &pt->r14;
1334  break;
1335  case ELF_GR_OFFSET(15):
1336  ptr = &pt->r15;
1337  }
1338  if (write_access)
1339  *ptr = *data;
1340  else
1341  *data = *ptr;
1342  return 0;
1343 }
1344 
1345 static int
1346 access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1347  unsigned long addr, unsigned long *data, int write_access)
1348 {
1349  struct pt_regs *pt;
1350  unsigned long *ptr = NULL;
1351 
1352  pt = task_pt_regs(target);
1353  switch (addr) {
1354  case ELF_BR_OFFSET(0):
1355  ptr = &pt->b0;
1356  break;
1357  case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1358  return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1359  data, write_access);
1360  case ELF_BR_OFFSET(6):
1361  ptr = &pt->b6;
1362  break;
1363  case ELF_BR_OFFSET(7):
1364  ptr = &pt->b7;
1365  }
1366  if (write_access)
1367  *ptr = *data;
1368  else
1369  *data = *ptr;
1370  return 0;
1371 }
1372 
1373 static int
1374 access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1375  unsigned long addr, unsigned long *data, int write_access)
1376 {
1377  struct pt_regs *pt;
1378  unsigned long cfm, urbs_end;
1379  unsigned long *ptr = NULL;
1380 
1381  pt = task_pt_regs(target);
1382  if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1383  switch (addr) {
1384  case ELF_AR_RSC_OFFSET:
1385  /* force PL3 */
1386  if (write_access)
1387  pt->ar_rsc = *data | (3 << 2);
1388  else
1389  *data = pt->ar_rsc;
1390  return 0;
1391  case ELF_AR_BSP_OFFSET:
1392  /*
1393  * By convention, we use PT_AR_BSP to refer to
1394  * the end of the user-level backing store.
1395  * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1396  * to get the real value of ar.bsp at the time
1397  * the kernel was entered.
1398  *
1399  * Furthermore, when changing the contents of
1400  * PT_AR_BSP (or PT_CFM) while the task is
1401  * blocked in a system call, convert the state
1402  * so that the non-system-call exit
1403  * path is used. This ensures that the proper
1404  * state will be picked up when resuming
1405  * execution. However, it *also* means that
1406  * once we write PT_AR_BSP/PT_CFM, it won't be
1407  * possible to modify the syscall arguments of
1408  * the pending system call any longer. This
1409  * shouldn't be an issue because modifying
1410  * PT_AR_BSP/PT_CFM generally implies that
1411  * we're either abandoning the pending system
1412  * call or that we defer it's re-execution
1413  * (e.g., due to GDB doing an inferior
1414  * function call).
1415  */
1416  urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1417  if (write_access) {
1418  if (*data != urbs_end) {
1419  if (in_syscall(pt))
1420  convert_to_non_syscall(target,
1421  pt,
1422  cfm);
1423  /*
1424  * Simulate user-level write
1425  * of ar.bsp:
1426  */
1427  pt->loadrs = 0;
1428  pt->ar_bspstore = *data;
1429  }
1430  } else
1431  *data = urbs_end;
1432  return 0;
1434  ptr = &pt->ar_bspstore;
1435  break;
1436  case ELF_AR_RNAT_OFFSET:
1437  ptr = &pt->ar_rnat;
1438  break;
1439  case ELF_AR_CCV_OFFSET:
1440  ptr = &pt->ar_ccv;
1441  break;
1442  case ELF_AR_UNAT_OFFSET:
1443  ptr = &pt->ar_unat;
1444  break;
1445  case ELF_AR_FPSR_OFFSET:
1446  ptr = &pt->ar_fpsr;
1447  break;
1448  case ELF_AR_PFS_OFFSET:
1449  ptr = &pt->ar_pfs;
1450  break;
1451  case ELF_AR_LC_OFFSET:
1452  return unw_access_ar(info, UNW_AR_LC, data,
1453  write_access);
1454  case ELF_AR_EC_OFFSET:
1455  return unw_access_ar(info, UNW_AR_EC, data,
1456  write_access);
1457  case ELF_AR_CSD_OFFSET:
1458  ptr = &pt->ar_csd;
1459  break;
1460  case ELF_AR_SSD_OFFSET:
1461  ptr = &pt->ar_ssd;
1462  }
1463  } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1464  switch (addr) {
1465  case ELF_CR_IIP_OFFSET:
1466  ptr = &pt->cr_iip;
1467  break;
1468  case ELF_CFM_OFFSET:
1469  urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1470  if (write_access) {
1471  if (((cfm ^ *data) & PFM_MASK) != 0) {
1472  if (in_syscall(pt))
1473  convert_to_non_syscall(target,
1474  pt,
1475  cfm);
1476  pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1477  | (*data & PFM_MASK));
1478  }
1479  } else
1480  *data = cfm;
1481  return 0;
1482  case ELF_CR_IPSR_OFFSET:
1483  if (write_access) {
1484  unsigned long tmp = *data;
1485  /* psr.ri==3 is a reserved value: SDM 2:25 */
1486  if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1487  tmp &= ~IA64_PSR_RI;
1488  pt->cr_ipsr = ((tmp & IPSR_MASK)
1489  | (pt->cr_ipsr & ~IPSR_MASK));
1490  } else
1491  *data = (pt->cr_ipsr & IPSR_MASK);
1492  return 0;
1493  }
1494  } else if (addr == ELF_NAT_OFFSET)
1495  return access_nat_bits(target, pt, info,
1496  data, write_access);
1497  else if (addr == ELF_PR_OFFSET)
1498  ptr = &pt->pr;
1499  else
1500  return -1;
1501 
1502  if (write_access)
1503  *ptr = *data;
1504  else
1505  *data = *ptr;
1506 
1507  return 0;
1508 }
1509 
1510 static int
1511 access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1512  unsigned long addr, unsigned long *data, int write_access)
1513 {
1514  if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1515  return access_elf_gpreg(target, info, addr, data, write_access);
1516  else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1517  return access_elf_breg(target, info, addr, data, write_access);
1518  else
1519  return access_elf_areg(target, info, addr, data, write_access);
1520 }
1521 
1522 void do_gpregs_get(struct unw_frame_info *info, void *arg)
1523 {
1524  struct pt_regs *pt;
1525  struct regset_getset *dst = arg;
1526  elf_greg_t tmp[16];
1527  unsigned int i, index, min_copy;
1528 
1529  if (unw_unwind_to_user(info) < 0)
1530  return;
1531 
1532  /*
1533  * coredump format:
1534  * r0-r31
1535  * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1536  * predicate registers (p0-p63)
1537  * b0-b7
1538  * ip cfm user-mask
1539  * ar.rsc ar.bsp ar.bspstore ar.rnat
1540  * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1541  */
1542 
1543 
1544  /* Skip r0 */
1545  if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1546  dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1547  &dst->u.get.kbuf,
1548  &dst->u.get.ubuf,
1549  0, ELF_GR_OFFSET(1));
1550  if (dst->ret || dst->count == 0)
1551  return;
1552  }
1553 
1554  /* gr1 - gr15 */
1555  if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1556  index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1557  min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1558  (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1559  for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1560  index++)
1561  if (access_elf_reg(dst->target, info, i,
1562  &tmp[index], 0) < 0) {
1563  dst->ret = -EIO;
1564  return;
1565  }
1566  dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1567  &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1568  ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1569  if (dst->ret || dst->count == 0)
1570  return;
1571  }
1572 
1573  /* r16-r31 */
1574  if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1575  pt = task_pt_regs(dst->target);
1576  dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1577  &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1579  if (dst->ret || dst->count == 0)
1580  return;
1581  }
1582 
1583  /* nat, pr, b0 - b7 */
1584  if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1585  index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1586  min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1587  (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1588  for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1589  index++)
1590  if (access_elf_reg(dst->target, info, i,
1591  &tmp[index], 0) < 0) {
1592  dst->ret = -EIO;
1593  return;
1594  }
1595  dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1596  &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1598  if (dst->ret || dst->count == 0)
1599  return;
1600  }
1601 
1602  /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1603  * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1604  */
1605  if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1606  index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1607  min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1608  (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1609  for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1610  index++)
1611  if (access_elf_reg(dst->target, info, i,
1612  &tmp[index], 0) < 0) {
1613  dst->ret = -EIO;
1614  return;
1615  }
1616  dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1617  &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1619  }
1620 }
1621 
1622 void do_gpregs_set(struct unw_frame_info *info, void *arg)
1623 {
1624  struct pt_regs *pt;
1625  struct regset_getset *dst = arg;
1626  elf_greg_t tmp[16];
1627  unsigned int i, index;
1628 
1629  if (unw_unwind_to_user(info) < 0)
1630  return;
1631 
1632  /* Skip r0 */
1633  if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1634  dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1635  &dst->u.set.kbuf,
1636  &dst->u.set.ubuf,
1637  0, ELF_GR_OFFSET(1));
1638  if (dst->ret || dst->count == 0)
1639  return;
1640  }
1641 
1642  /* gr1-gr15 */
1643  if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1644  i = dst->pos;
1645  index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1646  dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1647  &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1648  ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1649  if (dst->ret)
1650  return;
1651  for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1652  if (access_elf_reg(dst->target, info, i,
1653  &tmp[index], 1) < 0) {
1654  dst->ret = -EIO;
1655  return;
1656  }
1657  if (dst->count == 0)
1658  return;
1659  }
1660 
1661  /* gr16-gr31 */
1662  if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1663  pt = task_pt_regs(dst->target);
1664  dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1665  &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1667  if (dst->ret || dst->count == 0)
1668  return;
1669  }
1670 
1671  /* nat, pr, b0 - b7 */
1672  if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1673  i = dst->pos;
1674  index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1675  dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1676  &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1678  if (dst->ret)
1679  return;
1680  for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1681  if (access_elf_reg(dst->target, info, i,
1682  &tmp[index], 1) < 0) {
1683  dst->ret = -EIO;
1684  return;
1685  }
1686  if (dst->count == 0)
1687  return;
1688  }
1689 
1690  /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1691  * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1692  */
1693  if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1694  i = dst->pos;
1695  index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1696  dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1697  &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1699  if (dst->ret)
1700  return;
1701  for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1702  if (access_elf_reg(dst->target, info, i,
1703  &tmp[index], 1) < 0) {
1704  dst->ret = -EIO;
1705  return;
1706  }
1707  }
1708 }
1709 
1710 #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1711 
1712 void do_fpregs_get(struct unw_frame_info *info, void *arg)
1713 {
1714  struct regset_getset *dst = arg;
1715  struct task_struct *task = dst->target;
1716  elf_fpreg_t tmp[30];
1717  int index, min_copy, i;
1718 
1719  if (unw_unwind_to_user(info) < 0)
1720  return;
1721 
1722  /* Skip pos 0 and 1 */
1723  if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1724  dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1725  &dst->u.get.kbuf,
1726  &dst->u.get.ubuf,
1727  0, ELF_FP_OFFSET(2));
1728  if (dst->count == 0 || dst->ret)
1729  return;
1730  }
1731 
1732  /* fr2-fr31 */
1733  if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1734  index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1735 
1736  min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1737  dst->pos + dst->count);
1738  for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1739  index++)
1740  if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1741  &tmp[index])) {
1742  dst->ret = -EIO;
1743  return;
1744  }
1745  dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1746  &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1747  ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1748  if (dst->count == 0 || dst->ret)
1749  return;
1750  }
1751 
1752  /* fph */
1753  if (dst->count > 0) {
1754  ia64_flush_fph(dst->target);
1755  if (task->thread.flags & IA64_THREAD_FPH_VALID)
1756  dst->ret = user_regset_copyout(
1757  &dst->pos, &dst->count,
1758  &dst->u.get.kbuf, &dst->u.get.ubuf,
1759  &dst->target->thread.fph,
1760  ELF_FP_OFFSET(32), -1);
1761  else
1762  /* Zero fill instead. */
1763  dst->ret = user_regset_copyout_zero(
1764  &dst->pos, &dst->count,
1765  &dst->u.get.kbuf, &dst->u.get.ubuf,
1766  ELF_FP_OFFSET(32), -1);
1767  }
1768 }
1769 
1770 void do_fpregs_set(struct unw_frame_info *info, void *arg)
1771 {
1772  struct regset_getset *dst = arg;
1773  elf_fpreg_t fpreg, tmp[30];
1774  int index, start, end;
1775 
1776  if (unw_unwind_to_user(info) < 0)
1777  return;
1778 
1779  /* Skip pos 0 and 1 */
1780  if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1781  dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1782  &dst->u.set.kbuf,
1783  &dst->u.set.ubuf,
1784  0, ELF_FP_OFFSET(2));
1785  if (dst->count == 0 || dst->ret)
1786  return;
1787  }
1788 
1789  /* fr2-fr31 */
1790  if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1791  start = dst->pos;
1792  end = min(((unsigned int)ELF_FP_OFFSET(32)),
1793  dst->pos + dst->count);
1794  dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1795  &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1796  ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1797  if (dst->ret)
1798  return;
1799 
1800  if (start & 0xF) { /* only write high part */
1801  if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1802  &fpreg)) {
1803  dst->ret = -EIO;
1804  return;
1805  }
1806  tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1807  = fpreg.u.bits[0];
1808  start &= ~0xFUL;
1809  }
1810  if (end & 0xF) { /* only write low part */
1811  if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1812  &fpreg)) {
1813  dst->ret = -EIO;
1814  return;
1815  }
1816  tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1817  = fpreg.u.bits[1];
1818  end = (end + 0xF) & ~0xFUL;
1819  }
1820 
1821  for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1822  index = start / sizeof(elf_fpreg_t);
1823  if (unw_set_fr(info, index, tmp[index - 2])) {
1824  dst->ret = -EIO;
1825  return;
1826  }
1827  }
1828  if (dst->ret || dst->count == 0)
1829  return;
1830  }
1831 
1832  /* fph */
1833  if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1834  ia64_sync_fph(dst->target);
1835  dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1836  &dst->u.set.kbuf,
1837  &dst->u.set.ubuf,
1838  &dst->target->thread.fph,
1839  ELF_FP_OFFSET(32), -1);
1840  }
1841 }
1842 
1843 static int
1844 do_regset_call(void (*call)(struct unw_frame_info *, void *),
1845  struct task_struct *target,
1846  const struct user_regset *regset,
1847  unsigned int pos, unsigned int count,
1848  const void *kbuf, const void __user *ubuf)
1849 {
1850  struct regset_getset info = { .target = target, .regset = regset,
1851  .pos = pos, .count = count,
1852  .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1853  .ret = 0 };
1854 
1855  if (target == current)
1856  unw_init_running(call, &info);
1857  else {
1858  struct unw_frame_info ufi;
1859  memset(&ufi, 0, sizeof(ufi));
1860  unw_init_from_blocked_task(&ufi, target);
1861  (*call)(&ufi, &info);
1862  }
1863 
1864  return info.ret;
1865 }
1866 
1867 static int
1868 gpregs_get(struct task_struct *target,
1869  const struct user_regset *regset,
1870  unsigned int pos, unsigned int count,
1871  void *kbuf, void __user *ubuf)
1872 {
1873  return do_regset_call(do_gpregs_get, target, regset, pos, count,
1874  kbuf, ubuf);
1875 }
1876 
1877 static int gpregs_set(struct task_struct *target,
1878  const struct user_regset *regset,
1879  unsigned int pos, unsigned int count,
1880  const void *kbuf, const void __user *ubuf)
1881 {
1882  return do_regset_call(do_gpregs_set, target, regset, pos, count,
1883  kbuf, ubuf);
1884 }
1885 
1886 static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1887 {
1888  do_sync_rbs(info, ia64_sync_user_rbs);
1889 }
1890 
1891 /*
1892  * This is called to write back the register backing store.
1893  * ptrace does this before it stops, so that a tracer reading the user
1894  * memory after the thread stops will get the current register data.
1895  */
1896 static int
1897 gpregs_writeback(struct task_struct *target,
1898  const struct user_regset *regset,
1899  int now)
1900 {
1901  if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1902  return 0;
1903  set_notify_resume(target);
1904  return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1905  NULL, NULL);
1906 }
1907 
1908 static int
1909 fpregs_active(struct task_struct *target, const struct user_regset *regset)
1910 {
1911  return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1912 }
1913 
1914 static int fpregs_get(struct task_struct *target,
1915  const struct user_regset *regset,
1916  unsigned int pos, unsigned int count,
1917  void *kbuf, void __user *ubuf)
1918 {
1919  return do_regset_call(do_fpregs_get, target, regset, pos, count,
1920  kbuf, ubuf);
1921 }
1922 
1923 static int fpregs_set(struct task_struct *target,
1924  const struct user_regset *regset,
1925  unsigned int pos, unsigned int count,
1926  const void *kbuf, const void __user *ubuf)
1927 {
1928  return do_regset_call(do_fpregs_set, target, regset, pos, count,
1929  kbuf, ubuf);
1930 }
1931 
1932 static int
1933 access_uarea(struct task_struct *child, unsigned long addr,
1934  unsigned long *data, int write_access)
1935 {
1936  unsigned int pos = -1; /* an invalid value */
1937  int ret;
1938  unsigned long *ptr, regnum;
1939 
1940  if ((addr & 0x7) != 0) {
1941  dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1942  return -1;
1943  }
1944  if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1945  (addr >= PT_R7 + 8 && addr < PT_B1) ||
1946  (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1947  (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1948  dprintk("ptrace: rejecting access to register "
1949  "address 0x%lx\n", addr);
1950  return -1;
1951  }
1952 
1953  switch (addr) {
1954  case PT_F32 ... (PT_F127 + 15):
1955  pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1956  break;
1957  case PT_F2 ... (PT_F5 + 15):
1958  pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1959  break;
1960  case PT_F10 ... (PT_F31 + 15):
1961  pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1962  break;
1963  case PT_F6 ... (PT_F9 + 15):
1964  pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1965  break;
1966  }
1967 
1968  if (pos != -1) {
1969  if (write_access)
1970  ret = fpregs_set(child, NULL, pos,
1971  sizeof(unsigned long), data, NULL);
1972  else
1973  ret = fpregs_get(child, NULL, pos,
1974  sizeof(unsigned long), data, NULL);
1975  if (ret != 0)
1976  return -1;
1977  return 0;
1978  }
1979 
1980  switch (addr) {
1981  case PT_NAT_BITS:
1982  pos = ELF_NAT_OFFSET;
1983  break;
1984  case PT_R4 ... PT_R7:
1985  pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1986  break;
1987  case PT_B1 ... PT_B5:
1988  pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1989  break;
1990  case PT_AR_EC:
1991  pos = ELF_AR_EC_OFFSET;
1992  break;
1993  case PT_AR_LC:
1994  pos = ELF_AR_LC_OFFSET;
1995  break;
1996  case PT_CR_IPSR:
1997  pos = ELF_CR_IPSR_OFFSET;
1998  break;
1999  case PT_CR_IIP:
2000  pos = ELF_CR_IIP_OFFSET;
2001  break;
2002  case PT_CFM:
2003  pos = ELF_CFM_OFFSET;
2004  break;
2005  case PT_AR_UNAT:
2006  pos = ELF_AR_UNAT_OFFSET;
2007  break;
2008  case PT_AR_PFS:
2009  pos = ELF_AR_PFS_OFFSET;
2010  break;
2011  case PT_AR_RSC:
2012  pos = ELF_AR_RSC_OFFSET;
2013  break;
2014  case PT_AR_RNAT:
2015  pos = ELF_AR_RNAT_OFFSET;
2016  break;
2017  case PT_AR_BSPSTORE:
2018  pos = ELF_AR_BSPSTORE_OFFSET;
2019  break;
2020  case PT_PR:
2021  pos = ELF_PR_OFFSET;
2022  break;
2023  case PT_B6:
2024  pos = ELF_BR_OFFSET(6);
2025  break;
2026  case PT_AR_BSP:
2027  pos = ELF_AR_BSP_OFFSET;
2028  break;
2029  case PT_R1 ... PT_R3:
2030  pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2031  break;
2032  case PT_R12 ... PT_R15:
2033  pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2034  break;
2035  case PT_R8 ... PT_R11:
2036  pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2037  break;
2038  case PT_R16 ... PT_R31:
2039  pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2040  break;
2041  case PT_AR_CCV:
2042  pos = ELF_AR_CCV_OFFSET;
2043  break;
2044  case PT_AR_FPSR:
2045  pos = ELF_AR_FPSR_OFFSET;
2046  break;
2047  case PT_B0:
2048  pos = ELF_BR_OFFSET(0);
2049  break;
2050  case PT_B7:
2051  pos = ELF_BR_OFFSET(7);
2052  break;
2053  case PT_AR_CSD:
2054  pos = ELF_AR_CSD_OFFSET;
2055  break;
2056  case PT_AR_SSD:
2057  pos = ELF_AR_SSD_OFFSET;
2058  break;
2059  }
2060 
2061  if (pos != -1) {
2062  if (write_access)
2063  ret = gpregs_set(child, NULL, pos,
2064  sizeof(unsigned long), data, NULL);
2065  else
2066  ret = gpregs_get(child, NULL, pos,
2067  sizeof(unsigned long), data, NULL);
2068  if (ret != 0)
2069  return -1;
2070  return 0;
2071  }
2072 
2073  /* access debug registers */
2074  if (addr >= PT_IBR) {
2075  regnum = (addr - PT_IBR) >> 3;
2076  ptr = &child->thread.ibr[0];
2077  } else {
2078  regnum = (addr - PT_DBR) >> 3;
2079  ptr = &child->thread.dbr[0];
2080  }
2081 
2082  if (regnum >= 8) {
2083  dprintk("ptrace: rejecting access to register "
2084  "address 0x%lx\n", addr);
2085  return -1;
2086  }
2087 #ifdef CONFIG_PERFMON
2088  /*
2089  * Check if debug registers are used by perfmon. This
2090  * test must be done once we know that we can do the
2091  * operation, i.e. the arguments are all valid, but
2092  * before we start modifying the state.
2093  *
2094  * Perfmon needs to keep a count of how many processes
2095  * are trying to modify the debug registers for system
2096  * wide monitoring sessions.
2097  *
2098  * We also include read access here, because they may
2099  * cause the PMU-installed debug register state
2100  * (dbr[], ibr[]) to be reset. The two arrays are also
2101  * used by perfmon, but we do not use
2102  * IA64_THREAD_DBG_VALID. The registers are restored
2103  * by the PMU context switch code.
2104  */
2105  if (pfm_use_debug_registers(child))
2106  return -1;
2107 #endif
2108 
2109  if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2110  child->thread.flags |= IA64_THREAD_DBG_VALID;
2111  memset(child->thread.dbr, 0,
2112  sizeof(child->thread.dbr));
2113  memset(child->thread.ibr, 0,
2114  sizeof(child->thread.ibr));
2115  }
2116 
2117  ptr += regnum;
2118 
2119  if ((regnum & 1) && write_access) {
2120  /* don't let the user set kernel-level breakpoints: */
2121  *ptr = *data & ~(7UL << 56);
2122  return 0;
2123  }
2124  if (write_access)
2125  *ptr = *data;
2126  else
2127  *data = *ptr;
2128  return 0;
2129 }
2130 
2131 static const struct user_regset native_regsets[] = {
2132  {
2133  .core_note_type = NT_PRSTATUS,
2134  .n = ELF_NGREG,
2135  .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2136  .get = gpregs_get, .set = gpregs_set,
2137  .writeback = gpregs_writeback
2138  },
2139  {
2140  .core_note_type = NT_PRFPREG,
2141  .n = ELF_NFPREG,
2142  .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2143  .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2144  },
2145 };
2146 
2147 static const struct user_regset_view user_ia64_view = {
2148  .name = "ia64",
2149  .e_machine = EM_IA_64,
2150  .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2151 };
2152 
2154 {
2155  return &user_ia64_view;
2156 }
2157 
2159  unsigned int i;
2160  unsigned int n;
2161  unsigned long *args;
2162  struct pt_regs *regs;
2163  int rw;
2164 };
2165 
2166 static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2167 {
2168  struct syscall_get_set_args *args = data;
2169  struct pt_regs *pt = args->regs;
2170  unsigned long *krbs, cfm, ndirty;
2171  int i, count;
2172 
2173  if (unw_unwind_to_user(info) < 0)
2174  return;
2175 
2176  cfm = pt->cr_ifs;
2177  krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2178  ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2179 
2180  count = 0;
2181  if (in_syscall(pt))
2182  count = min_t(int, args->n, cfm & 0x7f);
2183 
2184  for (i = 0; i < count; i++) {
2185  if (args->rw)
2186  *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2187  args->args[i];
2188  else
2189  args->args[i] = *ia64_rse_skip_regs(krbs,
2190  ndirty + i + args->i);
2191  }
2192 
2193  if (!args->rw) {
2194  while (i < args->n) {
2195  args->args[i] = 0;
2196  i++;
2197  }
2198  }
2199 }
2200 
2202  struct pt_regs *regs, unsigned int i, unsigned int n,
2203  unsigned long *args, int rw)
2204 {
2205  struct syscall_get_set_args data = {
2206  .i = i,
2207  .n = n,
2208  .args = args,
2209  .regs = regs,
2210  .rw = rw,
2211  };
2212 
2213  if (task == current)
2214  unw_init_running(syscall_get_set_args_cb, &data);
2215  else {
2216  struct unw_frame_info ufi;
2217  memset(&ufi, 0, sizeof(ufi));
2218  unw_init_from_blocked_task(&ufi, task);
2219  syscall_get_set_args_cb(&ufi, &data);
2220  }
2221 }