Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
signal_32.c
Go to the documentation of this file.
1 /*
2  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3  *
4  * PowerPC version
5  * Copyright (C) 1995-1996 Gary Thomas ([email protected])
6  * Copyright (C) 2001 IBM
7  * Copyright (C) 1997,1998 Jakub Jelinek ([email protected])
8  * Copyright (C) 1997 David S. Miller ([email protected])
9  *
10  * Derived from "arch/i386/kernel/signal.c"
11  * Copyright (C) 1991, 1992 Linus Torvalds
12  * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13  *
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License
16  * as published by the Free Software Foundation; either version
17  * 2 of the License, or (at your option) any later version.
18  */
19 
20 #include <linux/sched.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/elf.h>
27 #include <linux/ptrace.h>
28 #include <linux/ratelimit.h>
29 #ifdef CONFIG_PPC64
30 #include <linux/syscalls.h>
31 #include <linux/compat.h>
32 #else
33 #include <linux/wait.h>
34 #include <linux/unistd.h>
35 #include <linux/stddef.h>
36 #include <linux/tty.h>
37 #include <linux/binfmts.h>
38 #endif
39 
40 #include <asm/uaccess.h>
41 #include <asm/cacheflush.h>
42 #include <asm/syscalls.h>
43 #include <asm/sigcontext.h>
44 #include <asm/vdso.h>
45 #include <asm/switch_to.h>
46 #ifdef CONFIG_PPC64
47 #include "ppc32.h"
48 #include <asm/unistd.h>
49 #else
50 #include <asm/ucontext.h>
51 #include <asm/pgtable.h>
52 #endif
53 
54 #include "signal.h"
55 
56 #undef DEBUG_SIG
57 
58 #ifdef CONFIG_PPC64
59 #define sys_sigsuspend compat_sys_sigsuspend
60 #define sys_rt_sigsuspend compat_sys_rt_sigsuspend
61 #define sys_rt_sigreturn compat_sys_rt_sigreturn
62 #define sys_sigaction compat_sys_sigaction
63 #define sys_swapcontext compat_sys_swapcontext
64 #define sys_sigreturn compat_sys_sigreturn
65 
66 #define old_sigaction old_sigaction32
67 #define sigcontext sigcontext32
68 #define mcontext mcontext32
69 #define ucontext ucontext32
70 
71 /*
72  * Userspace code may pass a ucontext which doesn't include VSX added
73  * at the end. We need to check for this case.
74  */
75 #define UCONTEXTSIZEWITHOUTVSX \
76  (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
77 
78 /*
79  * Returning 0 means we return to userspace via
80  * ret_from_except and thus restore all user
81  * registers from *regs. This is what we need
82  * to do when a signal has been delivered.
83  */
84 
85 #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
86 #undef __SIGNAL_FRAMESIZE
87 #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
88 #undef ELF_NVRREG
89 #define ELF_NVRREG ELF_NVRREG32
90 
91 /*
92  * Functions for flipping sigsets (thanks to brain dead generic
93  * implementation that makes things simple for little endian only)
94  */
95 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
96 {
97  compat_sigset_t cset;
98 
99  switch (_NSIG_WORDS) {
100  case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
101  cset.sig[7] = set->sig[3] >> 32;
102  case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
103  cset.sig[5] = set->sig[2] >> 32;
104  case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
105  cset.sig[3] = set->sig[1] >> 32;
106  case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
107  cset.sig[1] = set->sig[0] >> 32;
108  }
109  return copy_to_user(uset, &cset, sizeof(*uset));
110 }
111 
112 static inline int get_sigset_t(sigset_t *set,
113  const compat_sigset_t __user *uset)
114 {
115  compat_sigset_t s32;
116 
117  if (copy_from_user(&s32, uset, sizeof(*uset)))
118  return -EFAULT;
119 
120  /*
121  * Swap the 2 words of the 64-bit sigset_t (they are stored
122  * in the "wrong" endian in 32-bit user storage).
123  */
124  switch (_NSIG_WORDS) {
125  case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
126  case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
127  case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
128  case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
129  }
130  return 0;
131 }
132 
133 static inline int get_old_sigaction(struct k_sigaction *new_ka,
134  struct old_sigaction __user *act)
135 {
137  compat_uptr_t handler, restorer;
138 
139  if (get_user(handler, &act->sa_handler) ||
140  __get_user(restorer, &act->sa_restorer) ||
141  __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
142  __get_user(mask, &act->sa_mask))
143  return -EFAULT;
144  new_ka->sa.sa_handler = compat_ptr(handler);
145  new_ka->sa.sa_restorer = compat_ptr(restorer);
146  siginitset(&new_ka->sa.sa_mask, mask);
147  return 0;
148 }
149 
150 #define to_user_ptr(p) ptr_to_compat(p)
151 #define from_user_ptr(p) compat_ptr(p)
152 
153 static inline int save_general_regs(struct pt_regs *regs,
154  struct mcontext __user *frame)
155 {
156  elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
157  int i;
158 
159  WARN_ON(!FULL_REGS(regs));
160 
161  for (i = 0; i <= PT_RESULT; i ++) {
162  if (i == 14 && !FULL_REGS(regs))
163  i = 32;
164  if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
165  return -EFAULT;
166  }
167  return 0;
168 }
169 
170 static inline int restore_general_regs(struct pt_regs *regs,
171  struct mcontext __user *sr)
172 {
173  elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
174  int i;
175 
176  for (i = 0; i <= PT_RESULT; i++) {
177  if ((i == PT_MSR) || (i == PT_SOFTE))
178  continue;
179  if (__get_user(gregs[i], &sr->mc_gregs[i]))
180  return -EFAULT;
181  }
182  return 0;
183 }
184 
185 #else /* CONFIG_PPC64 */
186 
187 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
188 
189 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
190 {
191  return copy_to_user(uset, set, sizeof(*uset));
192 }
193 
194 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
195 {
196  return copy_from_user(set, uset, sizeof(*uset));
197 }
198 
199 static inline int get_old_sigaction(struct k_sigaction *new_ka,
200  struct old_sigaction __user *act)
201 {
203 
204  if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
205  __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
206  __get_user(new_ka->sa.sa_restorer, &act->sa_restorer) ||
207  __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
208  __get_user(mask, &act->sa_mask))
209  return -EFAULT;
210  siginitset(&new_ka->sa.sa_mask, mask);
211  return 0;
212 }
213 
214 #define to_user_ptr(p) ((unsigned long)(p))
215 #define from_user_ptr(p) ((void __user *)(p))
216 
217 static inline int save_general_regs(struct pt_regs *regs,
218  struct mcontext __user *frame)
219 {
220  WARN_ON(!FULL_REGS(regs));
221  return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
222 }
223 
224 static inline int restore_general_regs(struct pt_regs *regs,
225  struct mcontext __user *sr)
226 {
227  /* copy up to but not including MSR */
228  if (__copy_from_user(regs, &sr->mc_gregs,
229  PT_MSR * sizeof(elf_greg_t)))
230  return -EFAULT;
231  /* copy from orig_r3 (the word after the MSR) up to the end */
232  if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
233  GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
234  return -EFAULT;
235  return 0;
236 }
237 
238 #endif /* CONFIG_PPC64 */
239 
240 /*
241  * Atomically swap in the new signal mask, and wait for a signal.
242  */
244 {
245  sigset_t blocked;
246  siginitset(&blocked, mask);
247  return sigsuspend(&blocked);
248 }
249 
250 long sys_sigaction(int sig, struct old_sigaction __user *act,
251  struct old_sigaction __user *oact)
252 {
253  struct k_sigaction new_ka, old_ka;
254  int ret;
255 
256 #ifdef CONFIG_PPC64
257  if (sig < 0)
258  sig = -sig;
259 #endif
260 
261  if (act) {
262  if (get_old_sigaction(&new_ka, act))
263  return -EFAULT;
264  }
265 
266  ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
267  if (!ret && oact) {
268  if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
269  __put_user(to_user_ptr(old_ka.sa.sa_handler),
270  &oact->sa_handler) ||
271  __put_user(to_user_ptr(old_ka.sa.sa_restorer),
272  &oact->sa_restorer) ||
273  __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
274  __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
275  return -EFAULT;
276  }
277 
278  return ret;
279 }
280 
281 /*
282  * When we have signals to deliver, we set up on the
283  * user stack, going down from the original stack pointer:
284  * an ABI gap of 56 words
285  * an mcontext struct
286  * a sigcontext struct
287  * a gap of __SIGNAL_FRAMESIZE bytes
288  *
289  * Each of these things must be a multiple of 16 bytes in size. The following
290  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
291  *
292  */
293 struct sigframe {
294  struct sigcontext sctx; /* the sigcontext */
295  struct mcontext mctx; /* all the register values */
296  /*
297  * Programs using the rs6000/xcoff abi can save up to 19 gp
298  * regs and 18 fp regs below sp before decrementing it.
299  */
300  int abigap[56];
301 };
302 
303 /* We use the mc_pad field for the signal return trampoline. */
304 #define tramp mc_pad
305 
306 /*
307  * When we have rt signals to deliver, we set up on the
308  * user stack, going down from the original stack pointer:
309  * one rt_sigframe struct (siginfo + ucontext + ABI gap)
310  * a gap of __SIGNAL_FRAMESIZE+16 bytes
311  * (the +16 is to get the siginfo and ucontext in the same
312  * positions as in older kernels).
313  *
314  * Each of these things must be a multiple of 16 bytes in size.
315  *
316  */
317 struct rt_sigframe {
318 #ifdef CONFIG_PPC64
320 #else
321  struct siginfo info;
322 #endif
323  struct ucontext uc;
324  /*
325  * Programs using the rs6000/xcoff abi can save up to 19 gp
326  * regs and 18 fp regs below sp before decrementing it.
327  */
328  int abigap[56];
329 };
330 
331 #ifdef CONFIG_VSX
332 unsigned long copy_fpr_to_user(void __user *to,
333  struct task_struct *task)
334 {
335  double buf[ELF_NFPREG];
336  int i;
337 
338  /* save FPR copy to local buffer then write to the thread_struct */
339  for (i = 0; i < (ELF_NFPREG - 1) ; i++)
340  buf[i] = task->thread.TS_FPR(i);
341  memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
342  return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
343 }
344 
345 unsigned long copy_fpr_from_user(struct task_struct *task,
346  void __user *from)
347 {
348  double buf[ELF_NFPREG];
349  int i;
350 
351  if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
352  return 1;
353  for (i = 0; i < (ELF_NFPREG - 1) ; i++)
354  task->thread.TS_FPR(i) = buf[i];
355  memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
356 
357  return 0;
358 }
359 
360 unsigned long copy_vsx_to_user(void __user *to,
361  struct task_struct *task)
362 {
363  double buf[ELF_NVSRHALFREG];
364  int i;
365 
366  /* save FPR copy to local buffer then write to the thread_struct */
367  for (i = 0; i < ELF_NVSRHALFREG; i++)
368  buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
369  return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
370 }
371 
372 unsigned long copy_vsx_from_user(struct task_struct *task,
373  void __user *from)
374 {
375  double buf[ELF_NVSRHALFREG];
376  int i;
377 
378  if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
379  return 1;
380  for (i = 0; i < ELF_NVSRHALFREG ; i++)
381  task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
382  return 0;
383 }
384 #else
385 inline unsigned long copy_fpr_to_user(void __user *to,
386  struct task_struct *task)
387 {
388  return __copy_to_user(to, task->thread.fpr,
389  ELF_NFPREG * sizeof(double));
390 }
391 
392 inline unsigned long copy_fpr_from_user(struct task_struct *task,
393  void __user *from)
394 {
395  return __copy_from_user(task->thread.fpr, from,
396  ELF_NFPREG * sizeof(double));
397 }
398 #endif
399 
400 /*
401  * Save the current user registers on the user stack.
402  * We only save the altivec/spe registers if the process has used
403  * altivec/spe instructions at some point.
404  */
405 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
406  int sigret, int ctx_has_vsx_region)
407 {
408  unsigned long msr = regs->msr;
409 
410  /* Make sure floating point registers are stored in regs */
412 
413  /* save general registers */
414  if (save_general_regs(regs, frame))
415  return 1;
416 
417 #ifdef CONFIG_ALTIVEC
418  /* save altivec registers */
419  if (current->thread.used_vr) {
420  flush_altivec_to_thread(current);
421  if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
422  ELF_NVRREG * sizeof(vector128)))
423  return 1;
424  /* set MSR_VEC in the saved MSR value to indicate that
425  frame->mc_vregs contains valid data */
426  msr |= MSR_VEC;
427  }
428  /* else assert((regs->msr & MSR_VEC) == 0) */
429 
430  /* We always copy to/from vrsave, it's 0 if we don't have or don't
431  * use altivec. Since VSCR only contains 32 bits saved in the least
432  * significant bits of a vector, we "cheat" and stuff VRSAVE in the
433  * most significant bits of that same vector. --BenH
434  */
435  if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
436  return 1;
437 #endif /* CONFIG_ALTIVEC */
438  if (copy_fpr_to_user(&frame->mc_fregs, current))
439  return 1;
440 #ifdef CONFIG_VSX
441  /*
442  * Copy VSR 0-31 upper half from thread_struct to local
443  * buffer, then write that to userspace. Also set MSR_VSX in
444  * the saved MSR value to indicate that frame->mc_vregs
445  * contains valid data
446  */
447  if (current->thread.used_vsr && ctx_has_vsx_region) {
449  if (copy_vsx_to_user(&frame->mc_vsregs, current))
450  return 1;
451  msr |= MSR_VSX;
452  }
453 #endif /* CONFIG_VSX */
454 #ifdef CONFIG_SPE
455  /* save spe registers */
456  if (current->thread.used_spe) {
457  flush_spe_to_thread(current);
458  if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
459  ELF_NEVRREG * sizeof(u32)))
460  return 1;
461  /* set MSR_SPE in the saved MSR value to indicate that
462  frame->mc_vregs contains valid data */
463  msr |= MSR_SPE;
464  }
465  /* else assert((regs->msr & MSR_SPE) == 0) */
466 
467  /* We always copy to/from spefscr */
468  if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
469  return 1;
470 #endif /* CONFIG_SPE */
471 
472  if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
473  return 1;
474  if (sigret) {
475  /* Set up the sigreturn trampoline: li r0,sigret; sc */
476  if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
477  || __put_user(0x44000002UL, &frame->tramp[1]))
478  return 1;
479  flush_icache_range((unsigned long) &frame->tramp[0],
480  (unsigned long) &frame->tramp[2]);
481  }
482 
483  return 0;
484 }
485 
486 /*
487  * Restore the current user register values from the user stack,
488  * (except for MSR).
489  */
490 static long restore_user_regs(struct pt_regs *regs,
491  struct mcontext __user *sr, int sig)
492 {
493  long err;
494  unsigned int save_r2 = 0;
495  unsigned long msr;
496 #ifdef CONFIG_VSX
497  int i;
498 #endif
499 
500  /*
501  * restore general registers but not including MSR or SOFTE. Also
502  * take care of keeping r2 (TLS) intact if not a signal
503  */
504  if (!sig)
505  save_r2 = (unsigned int)regs->gpr[2];
506  err = restore_general_regs(regs, sr);
507  regs->trap = 0;
508  err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
509  if (!sig)
510  regs->gpr[2] = (unsigned long) save_r2;
511  if (err)
512  return 1;
513 
514  /* if doing signal return, restore the previous little-endian mode */
515  if (sig)
516  regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
517 
518  /*
519  * Do this before updating the thread state in
520  * current->thread.fpr/vr/evr. That way, if we get preempted
521  * and another task grabs the FPU/Altivec/SPE, it won't be
522  * tempted to save the current CPU state into the thread_struct
523  * and corrupt what we are writing there.
524  */
526 
527 #ifdef CONFIG_ALTIVEC
528  /*
529  * Force the process to reload the altivec registers from
530  * current->thread when it next does altivec instructions
531  */
532  regs->msr &= ~MSR_VEC;
533  if (msr & MSR_VEC) {
534  /* restore altivec registers from the stack */
535  if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
536  sizeof(sr->mc_vregs)))
537  return 1;
538  } else if (current->thread.used_vr)
539  memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
540 
541  /* Always get VRSAVE back */
542  if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
543  return 1;
544 #endif /* CONFIG_ALTIVEC */
545  if (copy_fpr_from_user(current, &sr->mc_fregs))
546  return 1;
547 
548 #ifdef CONFIG_VSX
549  /*
550  * Force the process to reload the VSX registers from
551  * current->thread when it next does VSX instruction.
552  */
553  regs->msr &= ~MSR_VSX;
554  if (msr & MSR_VSX) {
555  /*
556  * Restore altivec registers from the stack to a local
557  * buffer, then write this out to the thread_struct
558  */
559  if (copy_vsx_from_user(current, &sr->mc_vsregs))
560  return 1;
561  } else if (current->thread.used_vsr)
562  for (i = 0; i < 32 ; i++)
563  current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
564 #endif /* CONFIG_VSX */
565  /*
566  * force the process to reload the FP registers from
567  * current->thread when it next does FP instructions
568  */
569  regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
570 
571 #ifdef CONFIG_SPE
572  /* force the process to reload the spe registers from
573  current->thread when it next does spe instructions */
574  regs->msr &= ~MSR_SPE;
575  if (msr & MSR_SPE) {
576  /* restore spe registers from the stack */
577  if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
578  ELF_NEVRREG * sizeof(u32)))
579  return 1;
580  } else if (current->thread.used_spe)
581  memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
582 
583  /* Always get SPEFSCR back */
584  if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
585  return 1;
586 #endif /* CONFIG_SPE */
587 
588  return 0;
589 }
590 
591 #ifdef CONFIG_PPC64
592 long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
593  struct sigaction32 __user *oact, size_t sigsetsize)
594 {
595  struct k_sigaction new_ka, old_ka;
596  int ret;
597 
598  /* XXX: Don't preclude handling different sized sigset_t's. */
599  if (sigsetsize != sizeof(compat_sigset_t))
600  return -EINVAL;
601 
602  if (act) {
603  compat_uptr_t handler;
604 
605  ret = get_user(handler, &act->sa_handler);
606  new_ka.sa.sa_handler = compat_ptr(handler);
607  ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
608  ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
609  if (ret)
610  return -EFAULT;
611  }
612 
613  ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
614  if (!ret && oact) {
615  ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
616  ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
617  ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
618  }
619  return ret;
620 }
621 
622 /*
623  * Note: it is necessary to treat how as an unsigned int, with the
624  * corresponding cast to a signed int to insure that the proper
625  * conversion (sign extension) between the register representation
626  * of a signed int (msr in 32-bit mode) and the register representation
627  * of a signed int (msr in 64-bit mode) is performed.
628  */
629 long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
630  compat_sigset_t __user *oset, size_t sigsetsize)
631 {
632  sigset_t s;
633  sigset_t __user *up;
634  int ret;
635  mm_segment_t old_fs = get_fs();
636 
637  if (set) {
638  if (get_sigset_t(&s, set))
639  return -EFAULT;
640  }
641 
642  set_fs(KERNEL_DS);
643  /* This is valid because of the set_fs() */
644  up = (sigset_t __user *) &s;
645  ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
646  sigsetsize);
647  set_fs(old_fs);
648  if (ret)
649  return ret;
650  if (oset) {
651  if (put_sigset_t(oset, &s))
652  return -EFAULT;
653  }
654  return 0;
655 }
656 
657 long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
658 {
659  sigset_t s;
660  int ret;
661  mm_segment_t old_fs = get_fs();
662 
663  set_fs(KERNEL_DS);
664  /* The __user pointer cast is valid because of the set_fs() */
665  ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
666  set_fs(old_fs);
667  if (!ret) {
668  if (put_sigset_t(set, &s))
669  return -EFAULT;
670  }
671  return ret;
672 }
673 
674 
675 int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
676 {
677  int err;
678 
679  if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
680  return -EFAULT;
681 
682  /* If you change siginfo_t structure, please be sure
683  * this code is fixed accordingly.
684  * It should never copy any pad contained in the structure
685  * to avoid security leaks, but must copy the generic
686  * 3 ints plus the relevant union member.
687  * This routine must convert siginfo from 64bit to 32bit as well
688  * at the same time.
689  */
690  err = __put_user(s->si_signo, &d->si_signo);
691  err |= __put_user(s->si_errno, &d->si_errno);
692  err |= __put_user((short)s->si_code, &d->si_code);
693  if (s->si_code < 0)
694  err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
695  SI_PAD_SIZE32);
696  else switch(s->si_code >> 16) {
697  case __SI_CHLD >> 16:
698  err |= __put_user(s->si_pid, &d->si_pid);
699  err |= __put_user(s->si_uid, &d->si_uid);
700  err |= __put_user(s->si_utime, &d->si_utime);
701  err |= __put_user(s->si_stime, &d->si_stime);
702  err |= __put_user(s->si_status, &d->si_status);
703  break;
704  case __SI_FAULT >> 16:
705  err |= __put_user((unsigned int)(unsigned long)s->si_addr,
706  &d->si_addr);
707  break;
708  case __SI_POLL >> 16:
709  err |= __put_user(s->si_band, &d->si_band);
710  err |= __put_user(s->si_fd, &d->si_fd);
711  break;
712  case __SI_TIMER >> 16:
713  err |= __put_user(s->si_tid, &d->si_tid);
714  err |= __put_user(s->si_overrun, &d->si_overrun);
715  err |= __put_user(s->si_int, &d->si_int);
716  break;
717  case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
718  case __SI_MESGQ >> 16:
719  err |= __put_user(s->si_int, &d->si_int);
720  /* fallthrough */
721  case __SI_KILL >> 16:
722  default:
723  err |= __put_user(s->si_pid, &d->si_pid);
724  err |= __put_user(s->si_uid, &d->si_uid);
725  break;
726  }
727  return err;
728 }
729 
730 #define copy_siginfo_to_user copy_siginfo_to_user32
731 
732 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
733 {
734  memset(to, 0, sizeof *to);
735 
736  if (copy_from_user(to, from, 3*sizeof(int)) ||
738  from->_sifields._pad, SI_PAD_SIZE32))
739  return -EFAULT;
740 
741  return 0;
742 }
743 
744 /*
745  * Note: it is necessary to treat pid and sig as unsigned ints, with the
746  * corresponding cast to a signed int to insure that the proper conversion
747  * (sign extension) between the register representation of a signed int
748  * (msr in 32-bit mode) and the register representation of a signed int
749  * (msr in 64-bit mode) is performed.
750  */
751 long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
752 {
753  siginfo_t info;
754  int ret;
755  mm_segment_t old_fs = get_fs();
756 
757  ret = copy_siginfo_from_user32(&info, uinfo);
758  if (unlikely(ret))
759  return ret;
760 
761  set_fs (KERNEL_DS);
762  /* The __user pointer cast is valid becasuse of the set_fs() */
763  ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
764  set_fs (old_fs);
765  return ret;
766 }
767 /*
768  * Start Alternate signal stack support
769  *
770  * System Calls
771  * sigaltatck compat_sys_sigaltstack
772  */
773 
774 int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
775  int r6, int r7, int r8, struct pt_regs *regs)
776 {
777  stack_32_t __user * newstack = compat_ptr(__new);
778  stack_32_t __user * oldstack = compat_ptr(__old);
779  stack_t uss, uoss;
780  int ret;
781  mm_segment_t old_fs;
782  unsigned long sp;
783  compat_uptr_t ss_sp;
784 
785  /*
786  * set sp to the user stack on entry to the system call
787  * the system call router sets R9 to the saved registers
788  */
789  sp = regs->gpr[1];
790 
791  /* Put new stack info in local 64 bit stack struct */
792  if (newstack) {
793  if (get_user(ss_sp, &newstack->ss_sp) ||
794  __get_user(uss.ss_flags, &newstack->ss_flags) ||
795  __get_user(uss.ss_size, &newstack->ss_size))
796  return -EFAULT;
797  uss.ss_sp = compat_ptr(ss_sp);
798  }
799 
800  old_fs = get_fs();
801  set_fs(KERNEL_DS);
802  /* The __user pointer casts are valid because of the set_fs() */
803  ret = do_sigaltstack(
804  newstack ? (stack_t __user *) &uss : NULL,
805  oldstack ? (stack_t __user *) &uoss : NULL,
806  sp);
807  set_fs(old_fs);
808  /* Copy the stack information to the user output buffer */
809  if (!ret && oldstack &&
810  (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
811  __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
812  __put_user(uoss.ss_size, &oldstack->ss_size)))
813  return -EFAULT;
814  return ret;
815 }
816 #endif /* CONFIG_PPC64 */
817 
818 /*
819  * Set up a signal frame for a "real-time" signal handler
820  * (one which gets siginfo).
821  */
822 int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
823  siginfo_t *info, sigset_t *oldset,
824  struct pt_regs *regs)
825 {
826  struct rt_sigframe __user *rt_sf;
827  struct mcontext __user *frame;
828  void __user *addr;
829  unsigned long newsp = 0;
830 
831  /* Set up Signal Frame */
832  /* Put a Real Time Context onto stack */
833  rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
834  addr = rt_sf;
835  if (unlikely(rt_sf == NULL))
836  goto badframe;
837 
838  /* Put the siginfo & fill in most of the ucontext */
839  if (copy_siginfo_to_user(&rt_sf->info, info)
840  || __put_user(0, &rt_sf->uc.uc_flags)
841  || __put_user(0, &rt_sf->uc.uc_link)
842  || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
843  || __put_user(sas_ss_flags(regs->gpr[1]),
844  &rt_sf->uc.uc_stack.ss_flags)
845  || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
846  || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
847  &rt_sf->uc.uc_regs)
848  || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
849  goto badframe;
850 
851  /* Save user registers on the stack */
852  frame = &rt_sf->uc.uc_mcontext;
853  addr = frame;
854  if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
855  if (save_user_regs(regs, frame, 0, 1))
856  goto badframe;
857  regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
858  } else {
859  if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1))
860  goto badframe;
861  regs->link = (unsigned long) frame->tramp;
862  }
863 
864  current->thread.fpscr.val = 0; /* turn off all fp exceptions */
865 
866  /* create a stack frame for the caller of the handler */
867  newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
868  addr = (void __user *)regs->gpr[1];
869  if (put_user(regs->gpr[1], (u32 __user *)newsp))
870  goto badframe;
871 
872  /* Fill registers for signal handler */
873  regs->gpr[1] = newsp;
874  regs->gpr[3] = sig;
875  regs->gpr[4] = (unsigned long) &rt_sf->info;
876  regs->gpr[5] = (unsigned long) &rt_sf->uc;
877  regs->gpr[6] = (unsigned long) rt_sf;
878  regs->nip = (unsigned long) ka->sa.sa_handler;
879  /* enter the signal handler in big-endian mode */
880  regs->msr &= ~MSR_LE;
881  return 1;
882 
883 badframe:
884 #ifdef DEBUG_SIG
885  printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
886  regs, frame, newsp);
887 #endif
890  "%s[%d]: bad frame in handle_rt_signal32: "
891  "%p nip %08lx lr %08lx\n",
892  current->comm, current->pid,
893  addr, regs->nip, regs->link);
894 
895  force_sigsegv(sig, current);
896  return 0;
897 }
898 
899 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
900 {
901  sigset_t set;
902  struct mcontext __user *mcp;
903 
904  if (get_sigset_t(&set, &ucp->uc_sigmask))
905  return -EFAULT;
906 #ifdef CONFIG_PPC64
907  {
908  u32 cmcp;
909 
910  if (__get_user(cmcp, &ucp->uc_regs))
911  return -EFAULT;
912  mcp = (struct mcontext __user *)(u64)cmcp;
913  /* no need to check access_ok(mcp), since mcp < 4GB */
914  }
915 #else
916  if (__get_user(mcp, &ucp->uc_regs))
917  return -EFAULT;
918  if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
919  return -EFAULT;
920 #endif
921  set_current_blocked(&set);
922  if (restore_user_regs(regs, mcp, sig))
923  return -EFAULT;
924 
925  return 0;
926 }
927 
928 long sys_swapcontext(struct ucontext __user *old_ctx,
929  struct ucontext __user *new_ctx,
930  int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
931 {
932  unsigned char tmp;
933  int ctx_has_vsx_region = 0;
934 
935 #ifdef CONFIG_PPC64
936  unsigned long new_msr = 0;
937 
938  if (new_ctx) {
939  struct mcontext __user *mcp;
940  u32 cmcp;
941 
942  /*
943  * Get pointer to the real mcontext. No need for
944  * access_ok since we are dealing with compat
945  * pointers.
946  */
947  if (__get_user(cmcp, &new_ctx->uc_regs))
948  return -EFAULT;
949  mcp = (struct mcontext __user *)(u64)cmcp;
950  if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
951  return -EFAULT;
952  }
953  /*
954  * Check that the context is not smaller than the original
955  * size (with VMX but without VSX)
956  */
957  if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
958  return -EINVAL;
959  /*
960  * If the new context state sets the MSR VSX bits but
961  * it doesn't provide VSX state.
962  */
963  if ((ctx_size < sizeof(struct ucontext)) &&
964  (new_msr & MSR_VSX))
965  return -EINVAL;
966  /* Does the context have enough room to store VSX data? */
967  if (ctx_size >= sizeof(struct ucontext))
968  ctx_has_vsx_region = 1;
969 #else
970  /* Context size is for future use. Right now, we only make sure
971  * we are passed something we understand
972  */
973  if (ctx_size < sizeof(struct ucontext))
974  return -EINVAL;
975 #endif
976  if (old_ctx != NULL) {
977  struct mcontext __user *mctx;
978 
979  /*
980  * old_ctx might not be 16-byte aligned, in which
981  * case old_ctx->uc_mcontext won't be either.
982  * Because we have the old_ctx->uc_pad2 field
983  * before old_ctx->uc_mcontext, we need to round down
984  * from &old_ctx->uc_mcontext to a 16-byte boundary.
985  */
986  mctx = (struct mcontext __user *)
987  ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
988  if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
989  || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
990  || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
991  || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
992  return -EFAULT;
993  }
994  if (new_ctx == NULL)
995  return 0;
996  if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
997  || __get_user(tmp, (u8 __user *) new_ctx)
998  || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
999  return -EFAULT;
1000 
1001  /*
1002  * If we get a fault copying the context into the kernel's
1003  * image of the user's registers, we can't just return -EFAULT
1004  * because the user's registers will be corrupted. For instance
1005  * the NIP value may have been updated but not some of the
1006  * other registers. Given that we have done the access_ok
1007  * and successfully read the first and last bytes of the region
1008  * above, this should only happen in an out-of-memory situation
1009  * or if another thread unmaps the region containing the context.
1010  * We kill the task with a SIGSEGV in this situation.
1011  */
1012  if (do_setcontext(new_ctx, regs, 0))
1013  do_exit(SIGSEGV);
1014 
1015  set_thread_flag(TIF_RESTOREALL);
1016  return 0;
1017 }
1018 
1019 long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1020  struct pt_regs *regs)
1021 {
1022  struct rt_sigframe __user *rt_sf;
1023 
1024  /* Always make any pending restarted system calls return -EINTR */
1025  current_thread_info()->restart_block.fn = do_no_restart_syscall;
1026 
1027  rt_sf = (struct rt_sigframe __user *)
1028  (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1029  if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1030  goto bad;
1031  if (do_setcontext(&rt_sf->uc, regs, 1))
1032  goto bad;
1033 
1034  /*
1035  * It's not clear whether or why it is desirable to save the
1036  * sigaltstack setting on signal delivery and restore it on
1037  * signal return. But other architectures do this and we have
1038  * always done it up until now so it is probably better not to
1039  * change it. -- paulus
1040  */
1041 #ifdef CONFIG_PPC64
1042  /*
1043  * We use the compat_sys_ version that does the 32/64 bits conversion
1044  * and takes userland pointer directly. What about error checking ?
1045  * nobody does any...
1046  */
1047  compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
1048 #else
1049  do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
1050 #endif
1051  set_thread_flag(TIF_RESTOREALL);
1052  return 0;
1053 
1054  bad:
1057  "%s[%d]: bad frame in sys_rt_sigreturn: "
1058  "%p nip %08lx lr %08lx\n",
1059  current->comm, current->pid,
1060  rt_sf, regs->nip, regs->link);
1061 
1063  return 0;
1064 }
1065 
1066 #ifdef CONFIG_PPC32
1067 int sys_debug_setcontext(struct ucontext __user *ctx,
1068  int ndbg, struct sig_dbg_op __user *dbg,
1069  int r6, int r7, int r8,
1070  struct pt_regs *regs)
1071 {
1072  struct sig_dbg_op op;
1073  int i;
1074  unsigned char tmp;
1075  unsigned long new_msr = regs->msr;
1076 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1077  unsigned long new_dbcr0 = current->thread.dbcr0;
1078 #endif
1079 
1080  for (i=0; i<ndbg; i++) {
1081  if (copy_from_user(&op, dbg + i, sizeof(op)))
1082  return -EFAULT;
1083  switch (op.dbg_type) {
1085 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1086  if (op.dbg_value) {
1087  new_msr |= MSR_DE;
1088  new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1089  } else {
1090  new_dbcr0 &= ~DBCR0_IC;
1091  if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1092  current->thread.dbcr1)) {
1093  new_msr &= ~MSR_DE;
1094  new_dbcr0 &= ~DBCR0_IDM;
1095  }
1096  }
1097 #else
1098  if (op.dbg_value)
1099  new_msr |= MSR_SE;
1100  else
1101  new_msr &= ~MSR_SE;
1102 #endif
1103  break;
1105 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1106  return -EINVAL;
1107 #else
1108  if (op.dbg_value)
1109  new_msr |= MSR_BE;
1110  else
1111  new_msr &= ~MSR_BE;
1112 #endif
1113  break;
1114 
1115  default:
1116  return -EINVAL;
1117  }
1118  }
1119 
1120  /* We wait until here to actually install the values in the
1121  registers so if we fail in the above loop, it will not
1122  affect the contents of these registers. After this point,
1123  failure is a problem, anyway, and it's very unlikely unless
1124  the user is really doing something wrong. */
1125  regs->msr = new_msr;
1126 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1127  current->thread.dbcr0 = new_dbcr0;
1128 #endif
1129 
1130  if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1131  || __get_user(tmp, (u8 __user *) ctx)
1132  || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1133  return -EFAULT;
1134 
1135  /*
1136  * If we get a fault copying the context into the kernel's
1137  * image of the user's registers, we can't just return -EFAULT
1138  * because the user's registers will be corrupted. For instance
1139  * the NIP value may have been updated but not some of the
1140  * other registers. Given that we have done the access_ok
1141  * and successfully read the first and last bytes of the region
1142  * above, this should only happen in an out-of-memory situation
1143  * or if another thread unmaps the region containing the context.
1144  * We kill the task with a SIGSEGV in this situation.
1145  */
1146  if (do_setcontext(ctx, regs, 1)) {
1148  printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1149  "sys_debug_setcontext: %p nip %08lx "
1150  "lr %08lx\n",
1151  current->comm, current->pid,
1152  ctx, regs->nip, regs->link);
1153 
1155  goto out;
1156  }
1157 
1158  /*
1159  * It's not clear whether or why it is desirable to save the
1160  * sigaltstack setting on signal delivery and restore it on
1161  * signal return. But other architectures do this and we have
1162  * always done it up until now so it is probably better not to
1163  * change it. -- paulus
1164  */
1165  do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1166 
1167  set_thread_flag(TIF_RESTOREALL);
1168  out:
1169  return 0;
1170 }
1171 #endif
1172 
1173 /*
1174  * OK, we're invoking a handler
1175  */
1176 int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1177  siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
1178 {
1179  struct sigcontext __user *sc;
1180  struct sigframe __user *frame;
1181  unsigned long newsp = 0;
1182 
1183  /* Set up Signal Frame */
1184  frame = get_sigframe(ka, regs, sizeof(*frame), 1);
1185  if (unlikely(frame == NULL))
1186  goto badframe;
1187  sc = (struct sigcontext __user *) &frame->sctx;
1188 
1189 #if _NSIG != 64
1190 #error "Please adjust handle_signal()"
1191 #endif
1192  if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1193  || __put_user(oldset->sig[0], &sc->oldmask)
1194 #ifdef CONFIG_PPC64
1195  || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1196 #else
1197  || __put_user(oldset->sig[1], &sc->_unused[3])
1198 #endif
1199  || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1200  || __put_user(sig, &sc->signal))
1201  goto badframe;
1202 
1203  if (vdso32_sigtramp && current->mm->context.vdso_base) {
1204  if (save_user_regs(regs, &frame->mctx, 0, 1))
1205  goto badframe;
1206  regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
1207  } else {
1208  if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1))
1209  goto badframe;
1210  regs->link = (unsigned long) frame->mctx.tramp;
1211  }
1212 
1213  current->thread.fpscr.val = 0; /* turn off all fp exceptions */
1214 
1215  /* create a stack frame for the caller of the handler */
1216  newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1217  if (put_user(regs->gpr[1], (u32 __user *)newsp))
1218  goto badframe;
1219 
1220  regs->gpr[1] = newsp;
1221  regs->gpr[3] = sig;
1222  regs->gpr[4] = (unsigned long) sc;
1223  regs->nip = (unsigned long) ka->sa.sa_handler;
1224  /* enter the signal handler in big-endian mode */
1225  regs->msr &= ~MSR_LE;
1226 
1227  return 1;
1228 
1229 badframe:
1230 #ifdef DEBUG_SIG
1231  printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1232  regs, frame, newsp);
1233 #endif
1236  "%s[%d]: bad frame in handle_signal32: "
1237  "%p nip %08lx lr %08lx\n",
1238  current->comm, current->pid,
1239  frame, regs->nip, regs->link);
1240 
1241  force_sigsegv(sig, current);
1242  return 0;
1243 }
1244 
1245 /*
1246  * Do a signal return; undo the signal stack.
1247  */
1248 long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1249  struct pt_regs *regs)
1250 {
1251  struct sigcontext __user *sc;
1252  struct sigcontext sigctx;
1253  struct mcontext __user *sr;
1254  void __user *addr;
1255  sigset_t set;
1256 
1257  /* Always make any pending restarted system calls return -EINTR */
1258  current_thread_info()->restart_block.fn = do_no_restart_syscall;
1259 
1260  sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1261  addr = sc;
1262  if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1263  goto badframe;
1264 
1265 #ifdef CONFIG_PPC64
1266  /*
1267  * Note that PPC32 puts the upper 32 bits of the sigmask in the
1268  * unused part of the signal stackframe
1269  */
1270  set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1271 #else
1272  set.sig[0] = sigctx.oldmask;
1273  set.sig[1] = sigctx._unused[3];
1274 #endif
1275  set_current_blocked(&set);
1276 
1277  sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1278  addr = sr;
1279  if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1280  || restore_user_regs(regs, sr, 1))
1281  goto badframe;
1282 
1283  set_thread_flag(TIF_RESTOREALL);
1284  return 0;
1285 
1286 badframe:
1289  "%s[%d]: bad frame in sys_sigreturn: "
1290  "%p nip %08lx lr %08lx\n",
1291  current->comm, current->pid,
1292  addr, regs->nip, regs->link);
1293 
1295  return 0;
1296 }