Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
signal.c
Go to the documentation of this file.
1 /*
2  * linux/kernel/signal.c
3  *
4  * Copyright (C) 1991, 1992 Linus Torvalds
5  *
6  * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7  *
8  * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9  * Changes to use preallocated sigqueue structures
10  * to allow signals to be sent reliably.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/signal.h>
36 
37 #include <asm/param.h>
38 #include <asm/uaccess.h>
39 #include <asm/unistd.h>
40 #include <asm/siginfo.h>
41 #include <asm/cacheflush.h>
42 #include "audit.h" /* audit_signal_info() */
43 
44 /*
45  * SLAB caches for signal bits.
46  */
47 
48 static struct kmem_cache *sigqueue_cachep;
49 
51 
52 static void __user *sig_handler(struct task_struct *t, int sig)
53 {
54  return t->sighand->action[sig - 1].sa.sa_handler;
55 }
56 
57 static int sig_handler_ignored(void __user *handler, int sig)
58 {
59  /* Is it explicitly or implicitly ignored? */
60  return handler == SIG_IGN ||
61  (handler == SIG_DFL && sig_kernel_ignore(sig));
62 }
63 
64 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
65 {
66  void __user *handler;
67 
68  handler = sig_handler(t, sig);
69 
70  if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
71  handler == SIG_DFL && !force)
72  return 1;
73 
74  return sig_handler_ignored(handler, sig);
75 }
76 
77 static int sig_ignored(struct task_struct *t, int sig, bool force)
78 {
79  /*
80  * Blocked signals are never ignored, since the
81  * signal handler may change by the time it is
82  * unblocked.
83  */
84  if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
85  return 0;
86 
87  if (!sig_task_ignored(t, sig, force))
88  return 0;
89 
90  /*
91  * Tracers may want to know about even ignored signals.
92  */
93  return !t->ptrace;
94 }
95 
96 /*
97  * Re-calculate pending state from the set of locally pending
98  * signals, globally pending signals, and blocked signals.
99  */
100 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
101 {
102  unsigned long ready;
103  long i;
104 
105  switch (_NSIG_WORDS) {
106  default:
107  for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
108  ready |= signal->sig[i] &~ blocked->sig[i];
109  break;
110 
111  case 4: ready = signal->sig[3] &~ blocked->sig[3];
112  ready |= signal->sig[2] &~ blocked->sig[2];
113  ready |= signal->sig[1] &~ blocked->sig[1];
114  ready |= signal->sig[0] &~ blocked->sig[0];
115  break;
116 
117  case 2: ready = signal->sig[1] &~ blocked->sig[1];
118  ready |= signal->sig[0] &~ blocked->sig[0];
119  break;
120 
121  case 1: ready = signal->sig[0] &~ blocked->sig[0];
122  }
123  return ready != 0;
124 }
125 
126 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
127 
128 static int recalc_sigpending_tsk(struct task_struct *t)
129 {
130  if ((t->jobctl & JOBCTL_PENDING_MASK) ||
131  PENDING(&t->pending, &t->blocked) ||
132  PENDING(&t->signal->shared_pending, &t->blocked)) {
133  set_tsk_thread_flag(t, TIF_SIGPENDING);
134  return 1;
135  }
136  /*
137  * We must never clear the flag in another thread, or in current
138  * when it's possible the current syscall is returning -ERESTART*.
139  * So we don't clear it here, and only callers who know they should do.
140  */
141  return 0;
142 }
143 
144 /*
145  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
146  * This is superfluous when called on current, the wakeup is a harmless no-op.
147  */
149 {
150  if (recalc_sigpending_tsk(t))
151  signal_wake_up(t, 0);
152 }
153 
155 {
156  if (!recalc_sigpending_tsk(current) && !freezing(current))
157  clear_thread_flag(TIF_SIGPENDING);
158 
159 }
160 
161 /* Given the mask, find the first available signal that should be serviced. */
162 
163 #define SYNCHRONOUS_MASK \
164  (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
165  sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
166 
167 int next_signal(struct sigpending *pending, sigset_t *mask)
168 {
169  unsigned long i, *s, *m, x;
170  int sig = 0;
171 
172  s = pending->signal.sig;
173  m = mask->sig;
174 
175  /*
176  * Handle the first word specially: it contains the
177  * synchronous signals that need to be dequeued first.
178  */
179  x = *s &~ *m;
180  if (x) {
181  if (x & SYNCHRONOUS_MASK)
182  x &= SYNCHRONOUS_MASK;
183  sig = ffz(~x) + 1;
184  return sig;
185  }
186 
187  switch (_NSIG_WORDS) {
188  default:
189  for (i = 1; i < _NSIG_WORDS; ++i) {
190  x = *++s &~ *++m;
191  if (!x)
192  continue;
193  sig = ffz(~x) + i*_NSIG_BPW + 1;
194  break;
195  }
196  break;
197 
198  case 2:
199  x = s[1] &~ m[1];
200  if (!x)
201  break;
202  sig = ffz(~x) + _NSIG_BPW + 1;
203  break;
204 
205  case 1:
206  /* Nothing to do */
207  break;
208  }
209 
210  return sig;
211 }
212 
213 static inline void print_dropped_signal(int sig)
214 {
215  static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
216 
217  if (!print_fatal_signals)
218  return;
219 
221  return;
222 
223  printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
224  current->comm, current->pid, sig);
225 }
226 
244 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
245 {
248  BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
249 
250  if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
251  return false;
252 
253  if (mask & JOBCTL_STOP_SIGMASK)
254  task->jobctl &= ~JOBCTL_STOP_SIGMASK;
255 
256  task->jobctl |= mask;
257  return true;
258 }
259 
273 {
274  if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
275  task->jobctl &= ~JOBCTL_TRAPPING;
277  }
278 }
279 
295 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
296 {
297  BUG_ON(mask & ~JOBCTL_PENDING_MASK);
298 
299  if (mask & JOBCTL_STOP_PENDING)
301 
302  task->jobctl &= ~mask;
303 
304  if (!(task->jobctl & JOBCTL_PENDING_MASK))
306 }
307 
324 static bool task_participate_group_stop(struct task_struct *task)
325 {
326  struct signal_struct *sig = task->signal;
327  bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
328 
330 
332 
333  if (!consume)
334  return false;
335 
336  if (!WARN_ON_ONCE(sig->group_stop_count == 0))
337  sig->group_stop_count--;
338 
339  /*
340  * Tell the caller to notify completion iff we are entering into a
341  * fresh group stop. Read comment in do_signal_stop() for details.
342  */
343  if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
344  sig->flags = SIGNAL_STOP_STOPPED;
345  return true;
346  }
347  return false;
348 }
349 
350 /*
351  * allocate a new signal queue record
352  * - this may be called without locks if and only if t == current, otherwise an
353  * appropriate lock must be held to stop the target task from exiting
354  */
355 static struct sigqueue *
356 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
357 {
358  struct sigqueue *q = NULL;
359  struct user_struct *user;
360 
361  /*
362  * Protect access to @t credentials. This can go away when all
363  * callers hold rcu read lock.
364  */
365  rcu_read_lock();
366  user = get_uid(__task_cred(t)->user);
367  atomic_inc(&user->sigpending);
368  rcu_read_unlock();
369 
370  if (override_rlimit ||
371  atomic_read(&user->sigpending) <=
372  task_rlimit(t, RLIMIT_SIGPENDING)) {
373  q = kmem_cache_alloc(sigqueue_cachep, flags);
374  } else {
375  print_dropped_signal(sig);
376  }
377 
378  if (unlikely(q == NULL)) {
379  atomic_dec(&user->sigpending);
380  free_uid(user);
381  } else {
382  INIT_LIST_HEAD(&q->list);
383  q->flags = 0;
384  q->user = user;
385  }
386 
387  return q;
388 }
389 
390 static void __sigqueue_free(struct sigqueue *q)
391 {
392  if (q->flags & SIGQUEUE_PREALLOC)
393  return;
394  atomic_dec(&q->user->sigpending);
395  free_uid(q->user);
396  kmem_cache_free(sigqueue_cachep, q);
397 }
398 
400 {
401  struct sigqueue *q;
402 
403  sigemptyset(&queue->signal);
404  while (!list_empty(&queue->list)) {
405  q = list_entry(queue->list.next, struct sigqueue , list);
406  list_del_init(&q->list);
407  __sigqueue_free(q);
408  }
409 }
410 
411 /*
412  * Flush all pending signals for a task.
413  */
415 {
416  clear_tsk_thread_flag(t, TIF_SIGPENDING);
417  flush_sigqueue(&t->pending);
418  flush_sigqueue(&t->signal->shared_pending);
419 }
420 
421 void flush_signals(struct task_struct *t)
422 {
423  unsigned long flags;
424 
425  spin_lock_irqsave(&t->sighand->siglock, flags);
426  __flush_signals(t);
427  spin_unlock_irqrestore(&t->sighand->siglock, flags);
428 }
429 
430 static void __flush_itimer_signals(struct sigpending *pending)
431 {
432  sigset_t signal, retain;
433  struct sigqueue *q, *n;
434 
435  signal = pending->signal;
436  sigemptyset(&retain);
437 
438  list_for_each_entry_safe(q, n, &pending->list, list) {
439  int sig = q->info.si_signo;
440 
441  if (likely(q->info.si_code != SI_TIMER)) {
442  sigaddset(&retain, sig);
443  } else {
444  sigdelset(&signal, sig);
445  list_del_init(&q->list);
446  __sigqueue_free(q);
447  }
448  }
449 
450  sigorsets(&pending->signal, &signal, &retain);
451 }
452 
454 {
455  struct task_struct *tsk = current;
456  unsigned long flags;
457 
458  spin_lock_irqsave(&tsk->sighand->siglock, flags);
459  __flush_itimer_signals(&tsk->pending);
460  __flush_itimer_signals(&tsk->signal->shared_pending);
461  spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
462 }
463 
465 {
466  int i;
467 
468  for (i = 0; i < _NSIG; ++i)
469  t->sighand->action[i].sa.sa_handler = SIG_IGN;
470 
471  flush_signals(t);
472 }
473 
474 /*
475  * Flush all handlers for a task.
476  */
477 
478 void
479 flush_signal_handlers(struct task_struct *t, int force_default)
480 {
481  int i;
482  struct k_sigaction *ka = &t->sighand->action[0];
483  for (i = _NSIG ; i != 0 ; i--) {
484  if (force_default || ka->sa.sa_handler != SIG_IGN)
485  ka->sa.sa_handler = SIG_DFL;
486  ka->sa.sa_flags = 0;
487  sigemptyset(&ka->sa.sa_mask);
488  ka++;
489  }
490 }
491 
492 int unhandled_signal(struct task_struct *tsk, int sig)
493 {
494  void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
495  if (is_global_init(tsk))
496  return 1;
497  if (handler != SIG_IGN && handler != SIG_DFL)
498  return 0;
499  /* if ptraced, let the tracer determine */
500  return !tsk->ptrace;
501 }
502 
503 /*
504  * Notify the system that a driver wants to block all signals for this
505  * process, and wants to be notified if any signals at all were to be
506  * sent/acted upon. If the notifier routine returns non-zero, then the
507  * signal will be acted upon after all. If the notifier routine returns 0,
508  * then then signal will be blocked. Only one block per process is
509  * allowed. priv is a pointer to private data that the notifier routine
510  * can use to determine if the signal should be blocked or not.
511  */
512 void
513 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
514 {
515  unsigned long flags;
516 
517  spin_lock_irqsave(&current->sighand->siglock, flags);
518  current->notifier_mask = mask;
519  current->notifier_data = priv;
520  current->notifier = notifier;
521  spin_unlock_irqrestore(&current->sighand->siglock, flags);
522 }
523 
524 /* Notify the system that blocking has ended. */
525 
526 void
528 {
529  unsigned long flags;
530 
531  spin_lock_irqsave(&current->sighand->siglock, flags);
532  current->notifier = NULL;
533  current->notifier_data = NULL;
535  spin_unlock_irqrestore(&current->sighand->siglock, flags);
536 }
537 
538 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
539 {
540  struct sigqueue *q, *first = NULL;
541 
542  /*
543  * Collect the siginfo appropriate to this signal. Check if
544  * there is another siginfo for the same signal.
545  */
546  list_for_each_entry(q, &list->list, list) {
547  if (q->info.si_signo == sig) {
548  if (first)
549  goto still_pending;
550  first = q;
551  }
552  }
553 
554  sigdelset(&list->signal, sig);
555 
556  if (first) {
557 still_pending:
558  list_del_init(&first->list);
559  copy_siginfo(info, &first->info);
560  __sigqueue_free(first);
561  } else {
562  /*
563  * Ok, it wasn't in the queue. This must be
564  * a fast-pathed signal or we must have been
565  * out of queue space. So zero out the info.
566  */
567  info->si_signo = sig;
568  info->si_errno = 0;
569  info->si_code = SI_USER;
570  info->si_pid = 0;
571  info->si_uid = 0;
572  }
573 }
574 
575 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
576  siginfo_t *info)
577 {
578  int sig = next_signal(pending, mask);
579 
580  if (sig) {
581  if (current->notifier) {
582  if (sigismember(current->notifier_mask, sig)) {
583  if (!(current->notifier)(current->notifier_data)) {
584  clear_thread_flag(TIF_SIGPENDING);
585  return 0;
586  }
587  }
588  }
589 
590  collect_signal(sig, pending, info);
591  }
592 
593  return sig;
594 }
595 
596 /*
597  * Dequeue a signal and return the element to the caller, which is
598  * expected to free it.
599  *
600  * All callers have to hold the siglock.
601  */
602 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
603 {
604  int signr;
605 
606  /* We only dequeue private signals from ourselves, we don't let
607  * signalfd steal them
608  */
609  signr = __dequeue_signal(&tsk->pending, mask, info);
610  if (!signr) {
611  signr = __dequeue_signal(&tsk->signal->shared_pending,
612  mask, info);
613  /*
614  * itimer signal ?
615  *
616  * itimers are process shared and we restart periodic
617  * itimers in the signal delivery path to prevent DoS
618  * attacks in the high resolution timer case. This is
619  * compliant with the old way of self-restarting
620  * itimers, as the SIGALRM is a legacy signal and only
621  * queued once. Changing the restart behaviour to
622  * restart the timer in the signal dequeue path is
623  * reducing the timer noise on heavy loaded !highres
624  * systems too.
625  */
626  if (unlikely(signr == SIGALRM)) {
627  struct hrtimer *tmr = &tsk->signal->real_timer;
628 
629  if (!hrtimer_is_queued(tmr) &&
630  tsk->signal->it_real_incr.tv64 != 0) {
631  hrtimer_forward(tmr, tmr->base->get_time(),
632  tsk->signal->it_real_incr);
633  hrtimer_restart(tmr);
634  }
635  }
636  }
637 
639  if (!signr)
640  return 0;
641 
642  if (unlikely(sig_kernel_stop(signr))) {
643  /*
644  * Set a marker that we have dequeued a stop signal. Our
645  * caller might release the siglock and then the pending
646  * stop signal it is about to process is no longer in the
647  * pending bitmasks, but must still be cleared by a SIGCONT
648  * (and overruled by a SIGKILL). So those cases clear this
649  * shared flag after we've set it. Note that this flag may
650  * remain set after the signal we return is ignored or
651  * handled. That doesn't matter because its only purpose
652  * is to alert stop-signal processing code when another
653  * processor has come along and cleared the flag.
654  */
655  current->jobctl |= JOBCTL_STOP_DEQUEUED;
656  }
657  if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
658  /*
659  * Release the siglock to ensure proper locking order
660  * of timer locks outside of siglocks. Note, we leave
661  * irqs disabled here, since the posix-timers code is
662  * about to disable them again anyway.
663  */
664  spin_unlock(&tsk->sighand->siglock);
666  spin_lock(&tsk->sighand->siglock);
667  }
668  return signr;
669 }
670 
671 /*
672  * Tell a process that it has a new active signal..
673  *
674  * NOTE! we rely on the previous spin_lock to
675  * lock interrupts for us! We can only be called with
676  * "siglock" held, and the local interrupt must
677  * have been disabled when that got acquired!
678  *
679  * No need to set need_resched since signal event passing
680  * goes through ->blocked
681  */
682 void signal_wake_up(struct task_struct *t, int resume)
683 {
684  unsigned int mask;
685 
686  set_tsk_thread_flag(t, TIF_SIGPENDING);
687 
688  /*
689  * For SIGKILL, we want to wake it up in the stopped/traced/killable
690  * case. We don't check t->state here because there is a race with it
691  * executing another processor and just now entering stopped state.
692  * By using wake_up_state, we ensure the process will wake up and
693  * handle its death signal.
694  */
695  mask = TASK_INTERRUPTIBLE;
696  if (resume)
697  mask |= TASK_WAKEKILL;
698  if (!wake_up_state(t, mask))
699  kick_process(t);
700 }
701 
702 /*
703  * Remove signals in mask from the pending set and queue.
704  * Returns 1 if any signals were found.
705  *
706  * All callers must be holding the siglock.
707  *
708  * This version takes a sigset mask and looks at all signals,
709  * not just those in the first mask word.
710  */
711 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
712 {
713  struct sigqueue *q, *n;
714  sigset_t m;
715 
716  sigandsets(&m, mask, &s->signal);
717  if (sigisemptyset(&m))
718  return 0;
719 
720  sigandnsets(&s->signal, &s->signal, mask);
721  list_for_each_entry_safe(q, n, &s->list, list) {
722  if (sigismember(mask, q->info.si_signo)) {
723  list_del_init(&q->list);
724  __sigqueue_free(q);
725  }
726  }
727  return 1;
728 }
729 /*
730  * Remove signals in mask from the pending set and queue.
731  * Returns 1 if any signals were found.
732  *
733  * All callers must be holding the siglock.
734  */
735 static int rm_from_queue(unsigned long mask, struct sigpending *s)
736 {
737  struct sigqueue *q, *n;
738 
739  if (!sigtestsetmask(&s->signal, mask))
740  return 0;
741 
742  sigdelsetmask(&s->signal, mask);
743  list_for_each_entry_safe(q, n, &s->list, list) {
744  if (q->info.si_signo < SIGRTMIN &&
745  (mask & sigmask(q->info.si_signo))) {
746  list_del_init(&q->list);
747  __sigqueue_free(q);
748  }
749  }
750  return 1;
751 }
752 
753 static inline int is_si_special(const struct siginfo *info)
754 {
755  return info <= SEND_SIG_FORCED;
756 }
757 
758 static inline bool si_fromuser(const struct siginfo *info)
759 {
760  return info == SEND_SIG_NOINFO ||
761  (!is_si_special(info) && SI_FROMUSER(info));
762 }
763 
764 /*
765  * called with RCU read lock from check_kill_permission()
766  */
767 static int kill_ok_by_cred(struct task_struct *t)
768 {
769  const struct cred *cred = current_cred();
770  const struct cred *tcred = __task_cred(t);
771 
772  if (uid_eq(cred->euid, tcred->suid) ||
773  uid_eq(cred->euid, tcred->uid) ||
774  uid_eq(cred->uid, tcred->suid) ||
775  uid_eq(cred->uid, tcred->uid))
776  return 1;
777 
778  if (ns_capable(tcred->user_ns, CAP_KILL))
779  return 1;
780 
781  return 0;
782 }
783 
784 /*
785  * Bad permissions for sending the signal
786  * - the caller must hold the RCU read lock
787  */
788 static int check_kill_permission(int sig, struct siginfo *info,
789  struct task_struct *t)
790 {
791  struct pid *sid;
792  int error;
793 
794  if (!valid_signal(sig))
795  return -EINVAL;
796 
797  if (!si_fromuser(info))
798  return 0;
799 
800  error = audit_signal_info(sig, t); /* Let audit system see the signal */
801  if (error)
802  return error;
803 
804  if (!same_thread_group(current, t) &&
805  !kill_ok_by_cred(t)) {
806  switch (sig) {
807  case SIGCONT:
808  sid = task_session(t);
809  /*
810  * We don't return the error if sid == NULL. The
811  * task was unhashed, the caller must notice this.
812  */
813  if (!sid || sid == task_session(current))
814  break;
815  default:
816  return -EPERM;
817  }
818  }
819 
820  return security_task_kill(t, info, sig, 0);
821 }
822 
840 static void ptrace_trap_notify(struct task_struct *t)
841 {
842  WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
843  assert_spin_locked(&t->sighand->siglock);
844 
847 }
848 
849 /*
850  * Handle magic process-wide effects of stop/continue signals. Unlike
851  * the signal actions, these happen immediately at signal-generation
852  * time regardless of blocking, ignoring, or handling. This does the
853  * actual continuing for SIGCONT, but not the actual stopping for stop
854  * signals. The process stop is done as a signal action for SIG_DFL.
855  *
856  * Returns true if the signal should be actually delivered, otherwise
857  * it should be dropped.
858  */
859 static int prepare_signal(int sig, struct task_struct *p, bool force)
860 {
861  struct signal_struct *signal = p->signal;
862  struct task_struct *t;
863 
864  if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
865  /*
866  * The process is in the middle of dying, nothing to do.
867  */
868  } else if (sig_kernel_stop(sig)) {
869  /*
870  * This is a stop signal. Remove SIGCONT from all queues.
871  */
872  rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
873  t = p;
874  do {
875  rm_from_queue(sigmask(SIGCONT), &t->pending);
876  } while_each_thread(p, t);
877  } else if (sig == SIGCONT) {
878  unsigned int why;
879  /*
880  * Remove all stop signals from all queues, wake all threads.
881  */
882  rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
883  t = p;
884  do {
886  rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
887  if (likely(!(t->ptrace & PT_SEIZED)))
889  else
890  ptrace_trap_notify(t);
891  } while_each_thread(p, t);
892 
893  /*
894  * Notify the parent with CLD_CONTINUED if we were stopped.
895  *
896  * If we were in the middle of a group stop, we pretend it
897  * was already finished, and then continued. Since SIGCHLD
898  * doesn't queue we report only CLD_STOPPED, as if the next
899  * CLD_CONTINUED was dropped.
900  */
901  why = 0;
902  if (signal->flags & SIGNAL_STOP_STOPPED)
903  why |= SIGNAL_CLD_CONTINUED;
904  else if (signal->group_stop_count)
905  why |= SIGNAL_CLD_STOPPED;
906 
907  if (why) {
908  /*
909  * The first thread which returns from do_signal_stop()
910  * will take ->siglock, notice SIGNAL_CLD_MASK, and
911  * notify its parent. See get_signal_to_deliver().
912  */
913  signal->flags = why | SIGNAL_STOP_CONTINUED;
914  signal->group_stop_count = 0;
915  signal->group_exit_code = 0;
916  }
917  }
918 
919  return !sig_ignored(p, sig, force);
920 }
921 
922 /*
923  * Test if P wants to take SIG. After we've checked all threads with this,
924  * it's equivalent to finding no threads not blocking SIG. Any threads not
925  * blocking SIG were ruled out because they are not running and already
926  * have pending signals. Such threads will dequeue from the shared queue
927  * as soon as they're available, so putting the signal on the shared queue
928  * will be equivalent to sending it to one such thread.
929  */
930 static inline int wants_signal(int sig, struct task_struct *p)
931 {
932  if (sigismember(&p->blocked, sig))
933  return 0;
934  if (p->flags & PF_EXITING)
935  return 0;
936  if (sig == SIGKILL)
937  return 1;
939  return 0;
940  return task_curr(p) || !signal_pending(p);
941 }
942 
943 static void complete_signal(int sig, struct task_struct *p, int group)
944 {
945  struct signal_struct *signal = p->signal;
946  struct task_struct *t;
947 
948  /*
949  * Now find a thread we can wake up to take the signal off the queue.
950  *
951  * If the main thread wants the signal, it gets first crack.
952  * Probably the least surprising to the average bear.
953  */
954  if (wants_signal(sig, p))
955  t = p;
956  else if (!group || thread_group_empty(p))
957  /*
958  * There is just one thread and it does not need to be woken.
959  * It will dequeue unblocked signals before it runs again.
960  */
961  return;
962  else {
963  /*
964  * Otherwise try to find a suitable thread.
965  */
966  t = signal->curr_target;
967  while (!wants_signal(sig, t)) {
968  t = next_thread(t);
969  if (t == signal->curr_target)
970  /*
971  * No thread needs to be woken.
972  * Any eligible threads will see
973  * the signal in the queue soon.
974  */
975  return;
976  }
977  signal->curr_target = t;
978  }
979 
980  /*
981  * Found a killable thread. If the signal will be fatal,
982  * then start taking the whole group down immediately.
983  */
984  if (sig_fatal(p, sig) &&
985  !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
986  !sigismember(&t->real_blocked, sig) &&
987  (sig == SIGKILL || !t->ptrace)) {
988  /*
989  * This signal will be fatal to the whole group.
990  */
991  if (!sig_kernel_coredump(sig)) {
992  /*
993  * Start a group exit and wake everybody up.
994  * This way we don't have other threads
995  * running and doing things after a slower
996  * thread has the fatal signal pending.
997  */
998  signal->flags = SIGNAL_GROUP_EXIT;
999  signal->group_exit_code = sig;
1000  signal->group_stop_count = 0;
1001  t = p;
1002  do {
1004  sigaddset(&t->pending.signal, SIGKILL);
1005  signal_wake_up(t, 1);
1006  } while_each_thread(p, t);
1007  return;
1008  }
1009  }
1010 
1011  /*
1012  * The signal is already in the shared-pending queue.
1013  * Tell the chosen thread to wake up and dequeue it.
1014  */
1015  signal_wake_up(t, sig == SIGKILL);
1016  return;
1017 }
1018 
1019 static inline int legacy_queue(struct sigpending *signals, int sig)
1020 {
1021  return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1022 }
1023 
1024 #ifdef CONFIG_USER_NS
1025 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1026 {
1027  if (current_user_ns() == task_cred_xxx(t, user_ns))
1028  return;
1029 
1030  if (SI_FROMKERNEL(info))
1031  return;
1032 
1033  rcu_read_lock();
1034  info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1035  make_kuid(current_user_ns(), info->si_uid));
1036  rcu_read_unlock();
1037 }
1038 #else
1039 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1040 {
1041  return;
1042 }
1043 #endif
1044 
1045 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1046  int group, int from_ancestor_ns)
1047 {
1048  struct sigpending *pending;
1049  struct sigqueue *q;
1050  int override_rlimit;
1051  int ret = 0, result;
1052 
1053  assert_spin_locked(&t->sighand->siglock);
1054 
1056  if (!prepare_signal(sig, t,
1057  from_ancestor_ns || (info == SEND_SIG_FORCED)))
1058  goto ret;
1059 
1060  pending = group ? &t->signal->shared_pending : &t->pending;
1061  /*
1062  * Short-circuit ignored signals and support queuing
1063  * exactly one non-rt signal, so that we can get more
1064  * detailed information about the cause of the signal.
1065  */
1067  if (legacy_queue(pending, sig))
1068  goto ret;
1069 
1071  /*
1072  * fast-pathed signals for kernel-internal things like SIGSTOP
1073  * or SIGKILL.
1074  */
1075  if (info == SEND_SIG_FORCED)
1076  goto out_set;
1077 
1078  /*
1079  * Real-time signals must be queued if sent by sigqueue, or
1080  * some other real-time mechanism. It is implementation
1081  * defined whether kill() does so. We attempt to do so, on
1082  * the principle of least surprise, but since kill is not
1083  * allowed to fail with EAGAIN when low on memory we just
1084  * make sure at least one signal gets delivered and don't
1085  * pass on the info struct.
1086  */
1087  if (sig < SIGRTMIN)
1088  override_rlimit = (is_si_special(info) || info->si_code >= 0);
1089  else
1090  override_rlimit = 0;
1091 
1092  q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1093  override_rlimit);
1094  if (q) {
1095  list_add_tail(&q->list, &pending->list);
1096  switch ((unsigned long) info) {
1097  case (unsigned long) SEND_SIG_NOINFO:
1098  q->info.si_signo = sig;
1099  q->info.si_errno = 0;
1100  q->info.si_code = SI_USER;
1101  q->info.si_pid = task_tgid_nr_ns(current,
1102  task_active_pid_ns(t));
1104  break;
1105  case (unsigned long) SEND_SIG_PRIV:
1106  q->info.si_signo = sig;
1107  q->info.si_errno = 0;
1108  q->info.si_code = SI_KERNEL;
1109  q->info.si_pid = 0;
1110  q->info.si_uid = 0;
1111  break;
1112  default:
1113  copy_siginfo(&q->info, info);
1114  if (from_ancestor_ns)
1115  q->info.si_pid = 0;
1116  break;
1117  }
1118 
1119  userns_fixup_signal_uid(&q->info, t);
1120 
1121  } else if (!is_si_special(info)) {
1122  if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1123  /*
1124  * Queue overflow, abort. We may abort if the
1125  * signal was rt and sent by user using something
1126  * other than kill().
1127  */
1129  ret = -EAGAIN;
1130  goto ret;
1131  } else {
1132  /*
1133  * This is a silent loss of information. We still
1134  * send the signal, but the *info bits are lost.
1135  */
1137  }
1138  }
1139 
1140 out_set:
1141  signalfd_notify(t, sig);
1142  sigaddset(&pending->signal, sig);
1143  complete_signal(sig, t, group);
1144 ret:
1145  trace_signal_generate(sig, info, t, group, result);
1146  return ret;
1147 }
1148 
1149 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1150  int group)
1151 {
1152  int from_ancestor_ns = 0;
1153 
1154 #ifdef CONFIG_PID_NS
1155  from_ancestor_ns = si_fromuser(info) &&
1156  !task_pid_nr_ns(current, task_active_pid_ns(t));
1157 #endif
1158 
1159  return __send_signal(sig, info, t, group, from_ancestor_ns);
1160 }
1161 
1162 static void print_fatal_signal(struct pt_regs *regs, int signr)
1163 {
1164  printk("%s/%d: potentially unexpected fatal signal %d.\n",
1165  current->comm, task_pid_nr(current), signr);
1166 
1167 #if defined(__i386__) && !defined(__arch_um__)
1168  printk("code at %08lx: ", regs->ip);
1169  {
1170  int i;
1171  for (i = 0; i < 16; i++) {
1172  unsigned char insn;
1173 
1174  if (get_user(insn, (unsigned char *)(regs->ip + i)))
1175  break;
1176  printk("%02x ", insn);
1177  }
1178  }
1179 #endif
1180  printk("\n");
1181  preempt_disable();
1182  show_regs(regs);
1183  preempt_enable();
1184 }
1185 
1186 static int __init setup_print_fatal_signals(char *str)
1187 {
1188  get_option (&str, &print_fatal_signals);
1189 
1190  return 1;
1191 }
1192 
1193 __setup("print-fatal-signals=", setup_print_fatal_signals);
1194 
1195 int
1196 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1197 {
1198  return send_signal(sig, info, p, 1);
1199 }
1200 
1201 static int
1202 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1203 {
1204  return send_signal(sig, info, t, 0);
1205 }
1206 
1207 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1208  bool group)
1209 {
1210  unsigned long flags;
1211  int ret = -ESRCH;
1212 
1213  if (lock_task_sighand(p, &flags)) {
1214  ret = send_signal(sig, info, p, group);
1215  unlock_task_sighand(p, &flags);
1216  }
1217 
1218  return ret;
1219 }
1220 
1221 /*
1222  * Force a signal that the process can't ignore: if necessary
1223  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1224  *
1225  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1226  * since we do not want to have a signal handler that was blocked
1227  * be invoked when user space had explicitly blocked it.
1228  *
1229  * We don't want to have recursive SIGSEGV's etc, for example,
1230  * that is why we also clear SIGNAL_UNKILLABLE.
1231  */
1232 int
1233 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1234 {
1235  unsigned long int flags;
1236  int ret, blocked, ignored;
1237  struct k_sigaction *action;
1238 
1239  spin_lock_irqsave(&t->sighand->siglock, flags);
1240  action = &t->sighand->action[sig-1];
1241  ignored = action->sa.sa_handler == SIG_IGN;
1242  blocked = sigismember(&t->blocked, sig);
1243  if (blocked || ignored) {
1244  action->sa.sa_handler = SIG_DFL;
1245  if (blocked) {
1246  sigdelset(&t->blocked, sig);
1248  }
1249  }
1250  if (action->sa.sa_handler == SIG_DFL)
1251  t->signal->flags &= ~SIGNAL_UNKILLABLE;
1252  ret = specific_send_sig_info(sig, info, t);
1253  spin_unlock_irqrestore(&t->sighand->siglock, flags);
1254 
1255  return ret;
1256 }
1257 
1258 /*
1259  * Nuke all other threads in the group.
1260  */
1262 {
1263  struct task_struct *t = p;
1264  int count = 0;
1265 
1266  p->signal->group_stop_count = 0;
1267 
1268  while_each_thread(p, t) {
1270  count++;
1271 
1272  /* Don't bother with already dead threads */
1273  if (t->exit_state)
1274  continue;
1275  sigaddset(&t->pending.signal, SIGKILL);
1276  signal_wake_up(t, 1);
1277  }
1278 
1279  return count;
1280 }
1281 
1283  unsigned long *flags)
1284 {
1285  struct sighand_struct *sighand;
1286 
1287  for (;;) {
1288  local_irq_save(*flags);
1289  rcu_read_lock();
1290  sighand = rcu_dereference(tsk->sighand);
1291  if (unlikely(sighand == NULL)) {
1292  rcu_read_unlock();
1293  local_irq_restore(*flags);
1294  break;
1295  }
1296 
1297  spin_lock(&sighand->siglock);
1298  if (likely(sighand == tsk->sighand)) {
1299  rcu_read_unlock();
1300  break;
1301  }
1302  spin_unlock(&sighand->siglock);
1303  rcu_read_unlock();
1304  local_irq_restore(*flags);
1305  }
1306 
1307  return sighand;
1308 }
1309 
1310 /*
1311  * send signal info to all the members of a group
1312  */
1313 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1314 {
1315  int ret;
1316 
1317  rcu_read_lock();
1318  ret = check_kill_permission(sig, info, p);
1319  rcu_read_unlock();
1320 
1321  if (!ret && sig)
1322  ret = do_send_sig_info(sig, info, p, true);
1323 
1324  return ret;
1325 }
1326 
1327 /*
1328  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1329  * control characters do (^C, ^Z etc)
1330  * - the caller must hold at least a readlock on tasklist_lock
1331  */
1332 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1333 {
1334  struct task_struct *p = NULL;
1335  int retval, success;
1336 
1337  success = 0;
1338  retval = -ESRCH;
1339  do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1340  int err = group_send_sig_info(sig, info, p);
1341  success |= !err;
1342  retval = err;
1343  } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1344  return success ? 0 : retval;
1345 }
1346 
1347 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1348 {
1349  int error = -ESRCH;
1350  struct task_struct *p;
1351 
1352  rcu_read_lock();
1353 retry:
1354  p = pid_task(pid, PIDTYPE_PID);
1355  if (p) {
1356  error = group_send_sig_info(sig, info, p);
1357  if (unlikely(error == -ESRCH))
1358  /*
1359  * The task was unhashed in between, try again.
1360  * If it is dead, pid_task() will return NULL,
1361  * if we race with de_thread() it will find the
1362  * new leader.
1363  */
1364  goto retry;
1365  }
1366  rcu_read_unlock();
1367 
1368  return error;
1369 }
1370 
1371 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1372 {
1373  int error;
1374  rcu_read_lock();
1375  error = kill_pid_info(sig, info, find_vpid(pid));
1376  rcu_read_unlock();
1377  return error;
1378 }
1379 
1380 static int kill_as_cred_perm(const struct cred *cred,
1381  struct task_struct *target)
1382 {
1383  const struct cred *pcred = __task_cred(target);
1384  if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1385  !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1386  return 0;
1387  return 1;
1388 }
1389 
1390 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1391 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1392  const struct cred *cred, u32 secid)
1393 {
1394  int ret = -EINVAL;
1395  struct task_struct *p;
1396  unsigned long flags;
1397 
1398  if (!valid_signal(sig))
1399  return ret;
1400 
1401  rcu_read_lock();
1402  p = pid_task(pid, PIDTYPE_PID);
1403  if (!p) {
1404  ret = -ESRCH;
1405  goto out_unlock;
1406  }
1407  if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1408  ret = -EPERM;
1409  goto out_unlock;
1410  }
1411  ret = security_task_kill(p, info, sig, secid);
1412  if (ret)
1413  goto out_unlock;
1414 
1415  if (sig) {
1416  if (lock_task_sighand(p, &flags)) {
1417  ret = __send_signal(sig, info, p, 1, 0);
1418  unlock_task_sighand(p, &flags);
1419  } else
1420  ret = -ESRCH;
1421  }
1422 out_unlock:
1423  rcu_read_unlock();
1424  return ret;
1425 }
1427 
1428 /*
1429  * kill_something_info() interprets pid in interesting ways just like kill(2).
1430  *
1431  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1432  * is probably wrong. Should make it like BSD or SYSV.
1433  */
1434 
1435 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1436 {
1437  int ret;
1438 
1439  if (pid > 0) {
1440  rcu_read_lock();
1441  ret = kill_pid_info(sig, info, find_vpid(pid));
1442  rcu_read_unlock();
1443  return ret;
1444  }
1445 
1446  read_lock(&tasklist_lock);
1447  if (pid != -1) {
1448  ret = __kill_pgrp_info(sig, info,
1449  pid ? find_vpid(-pid) : task_pgrp(current));
1450  } else {
1451  int retval = 0, count = 0;
1452  struct task_struct * p;
1453 
1454  for_each_process(p) {
1455  if (task_pid_vnr(p) > 1 &&
1456  !same_thread_group(p, current)) {
1457  int err = group_send_sig_info(sig, info, p);
1458  ++count;
1459  if (err != -EPERM)
1460  retval = err;
1461  }
1462  }
1463  ret = count ? retval : -ESRCH;
1464  }
1465  read_unlock(&tasklist_lock);
1466 
1467  return ret;
1468 }
1469 
1470 /*
1471  * These are for backward compatibility with the rest of the kernel source.
1472  */
1473 
1474 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1475 {
1476  /*
1477  * Make sure legacy kernel users don't send in bad values
1478  * (normal paths check this in check_kill_permission).
1479  */
1480  if (!valid_signal(sig))
1481  return -EINVAL;
1482 
1483  return do_send_sig_info(sig, info, p, false);
1484 }
1485 
1486 #define __si_special(priv) \
1487  ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1488 
1489 int
1490 send_sig(int sig, struct task_struct *p, int priv)
1491 {
1492  return send_sig_info(sig, __si_special(priv), p);
1493 }
1494 
1495 void
1496 force_sig(int sig, struct task_struct *p)
1497 {
1498  force_sig_info(sig, SEND_SIG_PRIV, p);
1499 }
1500 
1501 /*
1502  * When things go south during signal handling, we
1503  * will force a SIGSEGV. And if the signal that caused
1504  * the problem was already a SIGSEGV, we'll want to
1505  * make sure we don't even try to deliver the signal..
1506  */
1507 int
1508 force_sigsegv(int sig, struct task_struct *p)
1509 {
1510  if (sig == SIGSEGV) {
1511  unsigned long flags;
1512  spin_lock_irqsave(&p->sighand->siglock, flags);
1513  p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1514  spin_unlock_irqrestore(&p->sighand->siglock, flags);
1515  }
1516  force_sig(SIGSEGV, p);
1517  return 0;
1518 }
1519 
1520 int kill_pgrp(struct pid *pid, int sig, int priv)
1521 {
1522  int ret;
1523 
1525  ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1527 
1528  return ret;
1529 }
1531 
1532 int kill_pid(struct pid *pid, int sig, int priv)
1533 {
1534  return kill_pid_info(sig, __si_special(priv), pid);
1535 }
1537 
1538 /*
1539  * These functions support sending signals using preallocated sigqueue
1540  * structures. This is needed "because realtime applications cannot
1541  * afford to lose notifications of asynchronous events, like timer
1542  * expirations or I/O completions". In the case of POSIX Timers
1543  * we allocate the sigqueue structure from the timer_create. If this
1544  * allocation fails we are able to report the failure to the application
1545  * with an EAGAIN error.
1546  */
1548 {
1549  struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1550 
1551  if (q)
1552  q->flags |= SIGQUEUE_PREALLOC;
1553 
1554  return q;
1555 }
1556 
1557 void sigqueue_free(struct sigqueue *q)
1558 {
1559  unsigned long flags;
1560  spinlock_t *lock = &current->sighand->siglock;
1561 
1562  BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1563  /*
1564  * We must hold ->siglock while testing q->list
1565  * to serialize with collect_signal() or with
1566  * __exit_signal()->flush_sigqueue().
1567  */
1568  spin_lock_irqsave(lock, flags);
1569  q->flags &= ~SIGQUEUE_PREALLOC;
1570  /*
1571  * If it is queued it will be freed when dequeued,
1572  * like the "regular" sigqueue.
1573  */
1574  if (!list_empty(&q->list))
1575  q = NULL;
1576  spin_unlock_irqrestore(lock, flags);
1577 
1578  if (q)
1579  __sigqueue_free(q);
1580 }
1581 
1582 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1583 {
1584  int sig = q->info.si_signo;
1585  struct sigpending *pending;
1586  unsigned long flags;
1587  int ret, result;
1588 
1589  BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1590 
1591  ret = -1;
1592  if (!likely(lock_task_sighand(t, &flags)))
1593  goto ret;
1594 
1595  ret = 1; /* the signal is ignored */
1596  result = TRACE_SIGNAL_IGNORED;
1597  if (!prepare_signal(sig, t, false))
1598  goto out;
1599 
1600  ret = 0;
1601  if (unlikely(!list_empty(&q->list))) {
1602  /*
1603  * If an SI_TIMER entry is already queue just increment
1604  * the overrun count.
1605  */
1606  BUG_ON(q->info.si_code != SI_TIMER);
1607  q->info.si_overrun++;
1609  goto out;
1610  }
1611  q->info.si_overrun = 0;
1612 
1613  signalfd_notify(t, sig);
1614  pending = group ? &t->signal->shared_pending : &t->pending;
1615  list_add_tail(&q->list, &pending->list);
1616  sigaddset(&pending->signal, sig);
1617  complete_signal(sig, t, group);
1618  result = TRACE_SIGNAL_DELIVERED;
1619 out:
1620  trace_signal_generate(sig, &q->info, t, group, result);
1621  unlock_task_sighand(t, &flags);
1622 ret:
1623  return ret;
1624 }
1625 
1626 /*
1627  * Let a parent know about the death of a child.
1628  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1629  *
1630  * Returns true if our parent ignored us and so we've switched to
1631  * self-reaping.
1632  */
1633 bool do_notify_parent(struct task_struct *tsk, int sig)
1634 {
1635  struct siginfo info;
1636  unsigned long flags;
1637  struct sighand_struct *psig;
1638  bool autoreap = false;
1639 
1640  BUG_ON(sig == -1);
1641 
1642  /* do_notify_parent_cldstop should have been called instead. */
1644 
1645  BUG_ON(!tsk->ptrace &&
1646  (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1647 
1648  if (sig != SIGCHLD) {
1649  /*
1650  * This is only possible if parent == real_parent.
1651  * Check if it has changed security domain.
1652  */
1653  if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1654  sig = SIGCHLD;
1655  }
1656 
1657  info.si_signo = sig;
1658  info.si_errno = 0;
1659  /*
1660  * We are under tasklist_lock here so our parent is tied to
1661  * us and cannot change.
1662  *
1663  * task_active_pid_ns will always return the same pid namespace
1664  * until a task passes through release_task.
1665  *
1666  * write_lock() currently calls preempt_disable() which is the
1667  * same as rcu_read_lock(), but according to Oleg, this is not
1668  * correct to rely on this
1669  */
1670  rcu_read_lock();
1671  info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1672  info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1673  task_uid(tsk));
1674  rcu_read_unlock();
1675 
1676  info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
1677  info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
1678 
1679  info.si_status = tsk->exit_code & 0x7f;
1680  if (tsk->exit_code & 0x80)
1681  info.si_code = CLD_DUMPED;
1682  else if (tsk->exit_code & 0x7f)
1683  info.si_code = CLD_KILLED;
1684  else {
1685  info.si_code = CLD_EXITED;
1686  info.si_status = tsk->exit_code >> 8;
1687  }
1688 
1689  psig = tsk->parent->sighand;
1690  spin_lock_irqsave(&psig->siglock, flags);
1691  if (!tsk->ptrace && sig == SIGCHLD &&
1692  (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1693  (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1694  /*
1695  * We are exiting and our parent doesn't care. POSIX.1
1696  * defines special semantics for setting SIGCHLD to SIG_IGN
1697  * or setting the SA_NOCLDWAIT flag: we should be reaped
1698  * automatically and not left for our parent's wait4 call.
1699  * Rather than having the parent do it as a magic kind of
1700  * signal handler, we just set this to tell do_exit that we
1701  * can be cleaned up without becoming a zombie. Note that
1702  * we still call __wake_up_parent in this case, because a
1703  * blocked sys_wait4 might now return -ECHILD.
1704  *
1705  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1706  * is implementation-defined: we do (if you don't want
1707  * it, just use SIG_IGN instead).
1708  */
1709  autoreap = true;
1710  if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1711  sig = 0;
1712  }
1713  if (valid_signal(sig) && sig)
1714  __group_send_sig_info(sig, &info, tsk->parent);
1715  __wake_up_parent(tsk, tsk->parent);
1716  spin_unlock_irqrestore(&psig->siglock, flags);
1717 
1718  return autoreap;
1719 }
1720 
1734 static void do_notify_parent_cldstop(struct task_struct *tsk,
1735  bool for_ptracer, int why)
1736 {
1737  struct siginfo info;
1738  unsigned long flags;
1739  struct task_struct *parent;
1740  struct sighand_struct *sighand;
1741 
1742  if (for_ptracer) {
1743  parent = tsk->parent;
1744  } else {
1745  tsk = tsk->group_leader;
1746  parent = tsk->real_parent;
1747  }
1748 
1749  info.si_signo = SIGCHLD;
1750  info.si_errno = 0;
1751  /*
1752  * see comment in do_notify_parent() about the following 4 lines
1753  */
1754  rcu_read_lock();
1755  info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1756  info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1757  rcu_read_unlock();
1758 
1759  info.si_utime = cputime_to_clock_t(tsk->utime);
1760  info.si_stime = cputime_to_clock_t(tsk->stime);
1761 
1762  info.si_code = why;
1763  switch (why) {
1764  case CLD_CONTINUED:
1765  info.si_status = SIGCONT;
1766  break;
1767  case CLD_STOPPED:
1768  info.si_status = tsk->signal->group_exit_code & 0x7f;
1769  break;
1770  case CLD_TRAPPED:
1771  info.si_status = tsk->exit_code & 0x7f;
1772  break;
1773  default:
1774  BUG();
1775  }
1776 
1777  sighand = parent->sighand;
1778  spin_lock_irqsave(&sighand->siglock, flags);
1779  if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1780  !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1781  __group_send_sig_info(SIGCHLD, &info, parent);
1782  /*
1783  * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1784  */
1785  __wake_up_parent(tsk, parent);
1786  spin_unlock_irqrestore(&sighand->siglock, flags);
1787 }
1788 
1789 static inline int may_ptrace_stop(void)
1790 {
1791  if (!likely(current->ptrace))
1792  return 0;
1793  /*
1794  * Are we in the middle of do_coredump?
1795  * If so and our tracer is also part of the coredump stopping
1796  * is a deadlock situation, and pointless because our tracer
1797  * is dead so don't allow us to stop.
1798  * If SIGKILL was already sent before the caller unlocked
1799  * ->siglock we must see ->core_state != NULL. Otherwise it
1800  * is safe to enter schedule().
1801  */
1802  if (unlikely(current->mm->core_state) &&
1803  unlikely(current->mm == current->parent->mm))
1804  return 0;
1805 
1806  return 1;
1807 }
1808 
1809 /*
1810  * Return non-zero if there is a SIGKILL that should be waking us up.
1811  * Called with the siglock held.
1812  */
1813 static int sigkill_pending(struct task_struct *tsk)
1814 {
1815  return sigismember(&tsk->pending.signal, SIGKILL) ||
1816  sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1817 }
1818 
1819 /*
1820  * This must be called with current->sighand->siglock held.
1821  *
1822  * This should be the path for all ptrace stops.
1823  * We always set current->last_siginfo while stopped here.
1824  * That makes it a way to test a stopped process for
1825  * being ptrace-stopped vs being job-control-stopped.
1826  *
1827  * If we actually decide not to stop at all because the tracer
1828  * is gone, we keep current->exit_code unless clear_code.
1829  */
1830 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1831  __releases(&current->sighand->siglock)
1832  __acquires(&current->sighand->siglock)
1833 {
1834  bool gstop_done = false;
1835 
1836  if (arch_ptrace_stop_needed(exit_code, info)) {
1837  /*
1838  * The arch code has something special to do before a
1839  * ptrace stop. This is allowed to block, e.g. for faults
1840  * on user stack pages. We can't keep the siglock while
1841  * calling arch_ptrace_stop, so we must release it now.
1842  * To preserve proper semantics, we must do this before
1843  * any signal bookkeeping like checking group_stop_count.
1844  * Meanwhile, a SIGKILL could come in before we retake the
1845  * siglock. That must prevent us from sleeping in TASK_TRACED.
1846  * So after regaining the lock, we must check for SIGKILL.
1847  */
1848  spin_unlock_irq(&current->sighand->siglock);
1849  arch_ptrace_stop(exit_code, info);
1850  spin_lock_irq(&current->sighand->siglock);
1851  if (sigkill_pending(current))
1852  return;
1853  }
1854 
1855  /*
1856  * We're committing to trapping. TRACED should be visible before
1857  * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1858  * Also, transition to TRACED and updates to ->jobctl should be
1859  * atomic with respect to siglock and should be done after the arch
1860  * hook as siglock is released and regrabbed across it.
1861  */
1863 
1864  current->last_siginfo = info;
1865  current->exit_code = exit_code;
1866 
1867  /*
1868  * If @why is CLD_STOPPED, we're trapping to participate in a group
1869  * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1870  * across siglock relocks since INTERRUPT was scheduled, PENDING
1871  * could be clear now. We act as if SIGCONT is received after
1872  * TASK_TRACED is entered - ignore it.
1873  */
1874  if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1875  gstop_done = task_participate_group_stop(current);
1876 
1877  /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1879  if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1881 
1882  /* entering a trap, clear TRAPPING */
1884 
1885  spin_unlock_irq(&current->sighand->siglock);
1886  read_lock(&tasklist_lock);
1887  if (may_ptrace_stop()) {
1888  /*
1889  * Notify parents of the stop.
1890  *
1891  * While ptraced, there are two parents - the ptracer and
1892  * the real_parent of the group_leader. The ptracer should
1893  * know about every stop while the real parent is only
1894  * interested in the completion of group stop. The states
1895  * for the two don't interact with each other. Notify
1896  * separately unless they're gonna be duplicates.
1897  */
1898  do_notify_parent_cldstop(current, true, why);
1899  if (gstop_done && ptrace_reparented(current))
1900  do_notify_parent_cldstop(current, false, why);
1901 
1902  /*
1903  * Don't want to allow preemption here, because
1904  * sys_ptrace() needs this task to be inactive.
1905  *
1906  * XXX: implement read_unlock_no_resched().
1907  */
1908  preempt_disable();
1909  read_unlock(&tasklist_lock);
1911  schedule();
1912  } else {
1913  /*
1914  * By the time we got the lock, our tracer went away.
1915  * Don't drop the lock yet, another tracer may come.
1916  *
1917  * If @gstop_done, the ptracer went away between group stop
1918  * completion and here. During detach, it would have set
1919  * JOBCTL_STOP_PENDING on us and we'll re-enter
1920  * TASK_STOPPED in do_signal_stop() on return, so notifying
1921  * the real parent of the group stop completion is enough.
1922  */
1923  if (gstop_done)
1924  do_notify_parent_cldstop(current, false, why);
1925 
1927  if (clear_code)
1928  current->exit_code = 0;
1929  read_unlock(&tasklist_lock);
1930  }
1931 
1932  /*
1933  * While in TASK_TRACED, we were considered "frozen enough".
1934  * Now that we woke up, it's crucial if we're supposed to be
1935  * frozen that we freeze now before running anything substantial.
1936  */
1937  try_to_freeze();
1938 
1939  /*
1940  * We are back. Now reacquire the siglock before touching
1941  * last_siginfo, so that we are sure to have synchronized with
1942  * any signal-sending on another CPU that wants to examine it.
1943  */
1944  spin_lock_irq(&current->sighand->siglock);
1945  current->last_siginfo = NULL;
1946 
1947  /* LISTENING can be set only during STOP traps, clear it */
1948  current->jobctl &= ~JOBCTL_LISTENING;
1949 
1950  /*
1951  * Queued signals ignored us while we were stopped for tracing.
1952  * So check for any that we should take before resuming user mode.
1953  * This sets TIF_SIGPENDING, but never clears it.
1954  */
1955  recalc_sigpending_tsk(current);
1956 }
1957 
1958 static void ptrace_do_notify(int signr, int exit_code, int why)
1959 {
1960  siginfo_t info;
1961 
1962  memset(&info, 0, sizeof info);
1963  info.si_signo = signr;
1964  info.si_code = exit_code;
1965  info.si_pid = task_pid_vnr(current);
1966  info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1967 
1968  /* Let the debugger run. */
1969  ptrace_stop(exit_code, why, 1, &info);
1970 }
1971 
1972 void ptrace_notify(int exit_code)
1973 {
1974  BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1975  if (unlikely(current->task_works))
1976  task_work_run();
1977 
1978  spin_lock_irq(&current->sighand->siglock);
1979  ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1980  spin_unlock_irq(&current->sighand->siglock);
1981 }
1982 
2005 static bool do_signal_stop(int signr)
2006  __releases(&current->sighand->siglock)
2007 {
2008  struct signal_struct *sig = current->signal;
2009 
2010  if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2011  unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2012  struct task_struct *t;
2013 
2014  /* signr will be recorded in task->jobctl for retries */
2016 
2017  if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2018  unlikely(signal_group_exit(sig)))
2019  return false;
2020  /*
2021  * There is no group stop already in progress. We must
2022  * initiate one now.
2023  *
2024  * While ptraced, a task may be resumed while group stop is
2025  * still in effect and then receive a stop signal and
2026  * initiate another group stop. This deviates from the
2027  * usual behavior as two consecutive stop signals can't
2028  * cause two group stops when !ptraced. That is why we
2029  * also check !task_is_stopped(t) below.
2030  *
2031  * The condition can be distinguished by testing whether
2032  * SIGNAL_STOP_STOPPED is already set. Don't generate
2033  * group_exit_code in such case.
2034  *
2035  * This is not necessary for SIGNAL_STOP_CONTINUED because
2036  * an intervening stop signal is required to cause two
2037  * continued events regardless of ptrace.
2038  */
2039  if (!(sig->flags & SIGNAL_STOP_STOPPED))
2040  sig->group_exit_code = signr;
2041 
2042  sig->group_stop_count = 0;
2043 
2044  if (task_set_jobctl_pending(current, signr | gstop))
2045  sig->group_stop_count++;
2046 
2047  for (t = next_thread(current); t != current;
2048  t = next_thread(t)) {
2049  /*
2050  * Setting state to TASK_STOPPED for a group
2051  * stop is always done with the siglock held,
2052  * so this check has no races.
2053  */
2054  if (!task_is_stopped(t) &&
2055  task_set_jobctl_pending(t, signr | gstop)) {
2056  sig->group_stop_count++;
2057  if (likely(!(t->ptrace & PT_SEIZED)))
2058  signal_wake_up(t, 0);
2059  else
2060  ptrace_trap_notify(t);
2061  }
2062  }
2063  }
2064 
2065  if (likely(!current->ptrace)) {
2066  int notify = 0;
2067 
2068  /*
2069  * If there are no other threads in the group, or if there
2070  * is a group stop in progress and we are the last to stop,
2071  * report to the parent.
2072  */
2073  if (task_participate_group_stop(current))
2074  notify = CLD_STOPPED;
2075 
2077  spin_unlock_irq(&current->sighand->siglock);
2078 
2079  /*
2080  * Notify the parent of the group stop completion. Because
2081  * we're not holding either the siglock or tasklist_lock
2082  * here, ptracer may attach inbetween; however, this is for
2083  * group stop and should always be delivered to the real
2084  * parent of the group leader. The new ptracer will get
2085  * its notification when this task transitions into
2086  * TASK_TRACED.
2087  */
2088  if (notify) {
2089  read_lock(&tasklist_lock);
2090  do_notify_parent_cldstop(current, false, notify);
2091  read_unlock(&tasklist_lock);
2092  }
2093 
2094  /* Now we don't run again until woken by SIGCONT or SIGKILL */
2095  schedule();
2096  return true;
2097  } else {
2098  /*
2099  * While ptraced, group stop is handled by STOP trap.
2100  * Schedule it and let the caller deal with it.
2101  */
2103  return false;
2104  }
2105 }
2106 
2122 static void do_jobctl_trap(void)
2123 {
2124  struct signal_struct *signal = current->signal;
2125  int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2126 
2127  if (current->ptrace & PT_SEIZED) {
2128  if (!signal->group_stop_count &&
2129  !(signal->flags & SIGNAL_STOP_STOPPED))
2130  signr = SIGTRAP;
2131  WARN_ON_ONCE(!signr);
2132  ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2133  CLD_STOPPED);
2134  } else {
2135  WARN_ON_ONCE(!signr);
2136  ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2137  current->exit_code = 0;
2138  }
2139 }
2140 
2141 static int ptrace_signal(int signr, siginfo_t *info,
2142  struct pt_regs *regs, void *cookie)
2143 {
2144  ptrace_signal_deliver(regs, cookie);
2145  /*
2146  * We do not check sig_kernel_stop(signr) but set this marker
2147  * unconditionally because we do not know whether debugger will
2148  * change signr. This flag has no meaning unless we are going
2149  * to stop after return from ptrace_stop(). In this case it will
2150  * be checked in do_signal_stop(), we should only stop if it was
2151  * not cleared by SIGCONT while we were sleeping. See also the
2152  * comment in dequeue_signal().
2153  */
2154  current->jobctl |= JOBCTL_STOP_DEQUEUED;
2155  ptrace_stop(signr, CLD_TRAPPED, 0, info);
2156 
2157  /* We're back. Did the debugger cancel the sig? */
2158  signr = current->exit_code;
2159  if (signr == 0)
2160  return signr;
2161 
2162  current->exit_code = 0;
2163 
2164  /*
2165  * Update the siginfo structure if the signal has
2166  * changed. If the debugger wanted something
2167  * specific in the siginfo structure then it should
2168  * have updated *info via PTRACE_SETSIGINFO.
2169  */
2170  if (signr != info->si_signo) {
2171  info->si_signo = signr;
2172  info->si_errno = 0;
2173  info->si_code = SI_USER;
2174  rcu_read_lock();
2175  info->si_pid = task_pid_vnr(current->parent);
2176  info->si_uid = from_kuid_munged(current_user_ns(),
2177  task_uid(current->parent));
2178  rcu_read_unlock();
2179  }
2180 
2181  /* If the (new) signal is now blocked, requeue it. */
2182  if (sigismember(&current->blocked, signr)) {
2183  specific_send_sig_info(signr, info, current);
2184  signr = 0;
2185  }
2186 
2187  return signr;
2188 }
2189 
2190 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2191  struct pt_regs *regs, void *cookie)
2192 {
2193  struct sighand_struct *sighand = current->sighand;
2194  struct signal_struct *signal = current->signal;
2195  int signr;
2196 
2197  if (unlikely(current->task_works))
2198  task_work_run();
2199 
2201  return 0;
2202 
2203 relock:
2204  /*
2205  * We'll jump back here after any time we were stopped in TASK_STOPPED.
2206  * While in TASK_STOPPED, we were considered "frozen enough".
2207  * Now that we woke up, it's crucial if we're supposed to be
2208  * frozen that we freeze now before running anything substantial.
2209  */
2210  try_to_freeze();
2211 
2212  spin_lock_irq(&sighand->siglock);
2213  /*
2214  * Every stopped thread goes here after wakeup. Check to see if
2215  * we should notify the parent, prepare_signal(SIGCONT) encodes
2216  * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2217  */
2218  if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2219  int why;
2220 
2221  if (signal->flags & SIGNAL_CLD_CONTINUED)
2222  why = CLD_CONTINUED;
2223  else
2224  why = CLD_STOPPED;
2225 
2226  signal->flags &= ~SIGNAL_CLD_MASK;
2227 
2228  spin_unlock_irq(&sighand->siglock);
2229 
2230  /*
2231  * Notify the parent that we're continuing. This event is
2232  * always per-process and doesn't make whole lot of sense
2233  * for ptracers, who shouldn't consume the state via
2234  * wait(2) either, but, for backward compatibility, notify
2235  * the ptracer of the group leader too unless it's gonna be
2236  * a duplicate.
2237  */
2239  do_notify_parent_cldstop(current, false, why);
2240 
2241  if (ptrace_reparented(current->group_leader))
2242  do_notify_parent_cldstop(current->group_leader,
2243  true, why);
2245 
2246  goto relock;
2247  }
2248 
2249  for (;;) {
2250  struct k_sigaction *ka;
2251 
2252  if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2253  do_signal_stop(0))
2254  goto relock;
2255 
2256  if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2257  do_jobctl_trap();
2258  spin_unlock_irq(&sighand->siglock);
2259  goto relock;
2260  }
2261 
2262  signr = dequeue_signal(current, &current->blocked, info);
2263 
2264  if (!signr)
2265  break; /* will return 0 */
2266 
2267  if (unlikely(current->ptrace) && signr != SIGKILL) {
2268  signr = ptrace_signal(signr, info,
2269  regs, cookie);
2270  if (!signr)
2271  continue;
2272  }
2273 
2274  ka = &sighand->action[signr-1];
2275 
2276  /* Trace actually delivered signals. */
2277  trace_signal_deliver(signr, info, ka);
2278 
2279  if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2280  continue;
2281  if (ka->sa.sa_handler != SIG_DFL) {
2282  /* Run the handler. */
2283  *return_ka = *ka;
2284 
2285  if (ka->sa.sa_flags & SA_ONESHOT)
2286  ka->sa.sa_handler = SIG_DFL;
2287 
2288  break; /* will return non-zero "signr" value */
2289  }
2290 
2291  /*
2292  * Now we are doing the default action for this signal.
2293  */
2294  if (sig_kernel_ignore(signr)) /* Default is nothing. */
2295  continue;
2296 
2297  /*
2298  * Global init gets no signals it doesn't want.
2299  * Container-init gets no signals it doesn't want from same
2300  * container.
2301  *
2302  * Note that if global/container-init sees a sig_kernel_only()
2303  * signal here, the signal must have been generated internally
2304  * or must have come from an ancestor namespace. In either
2305  * case, the signal cannot be dropped.
2306  */
2307  if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2308  !sig_kernel_only(signr))
2309  continue;
2310 
2311  if (sig_kernel_stop(signr)) {
2312  /*
2313  * The default action is to stop all threads in
2314  * the thread group. The job control signals
2315  * do nothing in an orphaned pgrp, but SIGSTOP
2316  * always works. Note that siglock needs to be
2317  * dropped during the call to is_orphaned_pgrp()
2318  * because of lock ordering with tasklist_lock.
2319  * This allows an intervening SIGCONT to be posted.
2320  * We need to check for that and bail out if necessary.
2321  */
2322  if (signr != SIGSTOP) {
2323  spin_unlock_irq(&sighand->siglock);
2324 
2325  /* signals can be posted during this window */
2326 
2328  goto relock;
2329 
2330  spin_lock_irq(&sighand->siglock);
2331  }
2332 
2333  if (likely(do_signal_stop(info->si_signo))) {
2334  /* It released the siglock. */
2335  goto relock;
2336  }
2337 
2338  /*
2339  * We didn't actually stop, due to a race
2340  * with SIGCONT or something like that.
2341  */
2342  continue;
2343  }
2344 
2345  spin_unlock_irq(&sighand->siglock);
2346 
2347  /*
2348  * Anything else is fatal, maybe with a core dump.
2349  */
2350  current->flags |= PF_SIGNALED;
2351 
2352  if (sig_kernel_coredump(signr)) {
2353  if (print_fatal_signals)
2354  print_fatal_signal(regs, info->si_signo);
2355  /*
2356  * If it was able to dump core, this kills all
2357  * other threads in the group and synchronizes with
2358  * their demise. If we lost the race with another
2359  * thread getting here, it set group_exit_code
2360  * first and our do_group_exit call below will use
2361  * that value and ignore the one we pass it.
2362  */
2363  do_coredump(info, regs);
2364  }
2365 
2366  /*
2367  * Death signals, no core dump.
2368  */
2369  do_group_exit(info->si_signo);
2370  /* NOTREACHED */
2371  }
2372  spin_unlock_irq(&sighand->siglock);
2373  return signr;
2374 }
2375 
2389 void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
2390  struct pt_regs *regs, int stepping)
2391 {
2392  sigset_t blocked;
2393 
2394  /* A signal was successfully delivered, and the
2395  saved sigmask was stored on the signal frame,
2396  and will be restored by sigreturn. So we can
2397  simply clear the restore sigmask flag. */
2398  clear_restore_sigmask();
2399 
2400  sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
2401  if (!(ka->sa.sa_flags & SA_NODEFER))
2402  sigaddset(&blocked, sig);
2403  set_current_blocked(&blocked);
2404  tracehook_signal_handler(sig, info, ka, regs, stepping);
2405 }
2406 
2407 /*
2408  * It could be that complete_signal() picked us to notify about the
2409  * group-wide signal. Other threads should be notified now to take
2410  * the shared signals in @which since we will not.
2411  */
2412 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2413 {
2414  sigset_t retarget;
2415  struct task_struct *t;
2416 
2417  sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2418  if (sigisemptyset(&retarget))
2419  return;
2420 
2421  t = tsk;
2422  while_each_thread(tsk, t) {
2423  if (t->flags & PF_EXITING)
2424  continue;
2425 
2426  if (!has_pending_signals(&retarget, &t->blocked))
2427  continue;
2428  /* Remove the signals this thread can handle. */
2429  sigandsets(&retarget, &retarget, &t->blocked);
2430 
2431  if (!signal_pending(t))
2432  signal_wake_up(t, 0);
2433 
2434  if (sigisemptyset(&retarget))
2435  break;
2436  }
2437 }
2438 
2439 void exit_signals(struct task_struct *tsk)
2440 {
2441  int group_stop = 0;
2442  sigset_t unblocked;
2443 
2444  /*
2445  * @tsk is about to have PF_EXITING set - lock out users which
2446  * expect stable threadgroup.
2447  */
2448  threadgroup_change_begin(tsk);
2449 
2450  if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2451  tsk->flags |= PF_EXITING;
2452  threadgroup_change_end(tsk);
2453  return;
2454  }
2455 
2456  spin_lock_irq(&tsk->sighand->siglock);
2457  /*
2458  * From now this task is not visible for group-wide signals,
2459  * see wants_signal(), do_signal_stop().
2460  */
2461  tsk->flags |= PF_EXITING;
2462 
2463  threadgroup_change_end(tsk);
2464 
2465  if (!signal_pending(tsk))
2466  goto out;
2467 
2468  unblocked = tsk->blocked;
2469  signotset(&unblocked);
2470  retarget_shared_pending(tsk, &unblocked);
2471 
2472  if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2473  task_participate_group_stop(tsk))
2474  group_stop = CLD_STOPPED;
2475 out:
2476  spin_unlock_irq(&tsk->sighand->siglock);
2477 
2478  /*
2479  * If group stop has completed, deliver the notification. This
2480  * should always go to the real parent of the group leader.
2481  */
2482  if (unlikely(group_stop)) {
2484  do_notify_parent_cldstop(tsk, false, group_stop);
2486  }
2487 }
2488 
2498 
2499 
2500 /*
2501  * System call entry points.
2502  */
2503 
2507 SYSCALL_DEFINE0(restart_syscall)
2508 {
2509  struct restart_block *restart = &current_thread_info()->restart_block;
2510  return restart->fn(restart);
2511 }
2512 
2514 {
2515  return -EINTR;
2516 }
2517 
2518 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2519 {
2520  if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2521  sigset_t newblocked;
2522  /* A set of now blocked but previously unblocked signals. */
2523  sigandnsets(&newblocked, newset, &current->blocked);
2524  retarget_shared_pending(tsk, &newblocked);
2525  }
2526  tsk->blocked = *newset;
2528 }
2529 
2538 {
2539  struct task_struct *tsk = current;
2540  sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2541  spin_lock_irq(&tsk->sighand->siglock);
2542  __set_task_blocked(tsk, newset);
2543  spin_unlock_irq(&tsk->sighand->siglock);
2544 }
2545 
2546 void __set_current_blocked(const sigset_t *newset)
2547 {
2548  struct task_struct *tsk = current;
2549 
2550  spin_lock_irq(&tsk->sighand->siglock);
2551  __set_task_blocked(tsk, newset);
2552  spin_unlock_irq(&tsk->sighand->siglock);
2553 }
2554 
2555 /*
2556  * This is also useful for kernel threads that want to temporarily
2557  * (or permanently) block certain signals.
2558  *
2559  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2560  * interface happily blocks "unblockable" signals like SIGKILL
2561  * and friends.
2562  */
2563 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2564 {
2565  struct task_struct *tsk = current;
2566  sigset_t newset;
2567 
2568  /* Lockless, only current can change ->blocked, never from irq */
2569  if (oldset)
2570  *oldset = tsk->blocked;
2571 
2572  switch (how) {
2573  case SIG_BLOCK:
2574  sigorsets(&newset, &tsk->blocked, set);
2575  break;
2576  case SIG_UNBLOCK:
2577  sigandnsets(&newset, &tsk->blocked, set);
2578  break;
2579  case SIG_SETMASK:
2580  newset = *set;
2581  break;
2582  default:
2583  return -EINVAL;
2584  }
2585 
2586  __set_current_blocked(&newset);
2587  return 0;
2588 }
2589 
2597 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2598  sigset_t __user *, oset, size_t, sigsetsize)
2599 {
2600  sigset_t old_set, new_set;
2601  int error;
2602 
2603  /* XXX: Don't preclude handling different sized sigset_t's. */
2604  if (sigsetsize != sizeof(sigset_t))
2605  return -EINVAL;
2606 
2607  old_set = current->blocked;
2608 
2609  if (nset) {
2610  if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2611  return -EFAULT;
2612  sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2613 
2614  error = sigprocmask(how, &new_set, NULL);
2615  if (error)
2616  return error;
2617  }
2618 
2619  if (oset) {
2620  if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2621  return -EFAULT;
2622  }
2623 
2624  return 0;
2625 }
2626 
2627 long do_sigpending(void __user *set, unsigned long sigsetsize)
2628 {
2629  long error = -EINVAL;
2630  sigset_t pending;
2631 
2632  if (sigsetsize > sizeof(sigset_t))
2633  goto out;
2634 
2635  spin_lock_irq(&current->sighand->siglock);
2636  sigorsets(&pending, &current->pending.signal,
2637  &current->signal->shared_pending.signal);
2638  spin_unlock_irq(&current->sighand->siglock);
2639 
2640  /* Outside the lock because only this thread touches it. */
2641  sigandsets(&pending, &current->blocked, &pending);
2642 
2643  error = -EFAULT;
2644  if (!copy_to_user(set, &pending, sigsetsize))
2645  error = 0;
2646 
2647 out:
2648  return error;
2649 }
2650 
2657 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2658 {
2659  return do_sigpending(set, sigsetsize);
2660 }
2661 
2662 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2663 
2665 {
2666  int err;
2667 
2668  if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2669  return -EFAULT;
2670  if (from->si_code < 0)
2671  return __copy_to_user(to, from, sizeof(siginfo_t))
2672  ? -EFAULT : 0;
2673  /*
2674  * If you change siginfo_t structure, please be sure
2675  * this code is fixed accordingly.
2676  * Please remember to update the signalfd_copyinfo() function
2677  * inside fs/signalfd.c too, in case siginfo_t changes.
2678  * It should never copy any pad contained in the structure
2679  * to avoid security leaks, but must copy the generic
2680  * 3 ints plus the relevant union member.
2681  */
2682  err = __put_user(from->si_signo, &to->si_signo);
2683  err |= __put_user(from->si_errno, &to->si_errno);
2684  err |= __put_user((short)from->si_code, &to->si_code);
2685  switch (from->si_code & __SI_MASK) {
2686  case __SI_KILL:
2687  err |= __put_user(from->si_pid, &to->si_pid);
2688  err |= __put_user(from->si_uid, &to->si_uid);
2689  break;
2690  case __SI_TIMER:
2691  err |= __put_user(from->si_tid, &to->si_tid);
2692  err |= __put_user(from->si_overrun, &to->si_overrun);
2693  err |= __put_user(from->si_ptr, &to->si_ptr);
2694  break;
2695  case __SI_POLL:
2696  err |= __put_user(from->si_band, &to->si_band);
2697  err |= __put_user(from->si_fd, &to->si_fd);
2698  break;
2699  case __SI_FAULT:
2700  err |= __put_user(from->si_addr, &to->si_addr);
2701 #ifdef __ARCH_SI_TRAPNO
2702  err |= __put_user(from->si_trapno, &to->si_trapno);
2703 #endif
2704 #ifdef BUS_MCEERR_AO
2705  /*
2706  * Other callers might not initialize the si_lsb field,
2707  * so check explicitly for the right codes here.
2708  */
2709  if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2710  err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2711 #endif
2712  break;
2713  case __SI_CHLD:
2714  err |= __put_user(from->si_pid, &to->si_pid);
2715  err |= __put_user(from->si_uid, &to->si_uid);
2716  err |= __put_user(from->si_status, &to->si_status);
2717  err |= __put_user(from->si_utime, &to->si_utime);
2718  err |= __put_user(from->si_stime, &to->si_stime);
2719  break;
2720  case __SI_RT: /* This is not generated by the kernel as of now. */
2721  case __SI_MESGQ: /* But this is */
2722  err |= __put_user(from->si_pid, &to->si_pid);
2723  err |= __put_user(from->si_uid, &to->si_uid);
2724  err |= __put_user(from->si_ptr, &to->si_ptr);
2725  break;
2726 #ifdef __ARCH_SIGSYS
2727  case __SI_SYS:
2728  err |= __put_user(from->si_call_addr, &to->si_call_addr);
2729  err |= __put_user(from->si_syscall, &to->si_syscall);
2730  err |= __put_user(from->si_arch, &to->si_arch);
2731  break;
2732 #endif
2733  default: /* this is just in case for now ... */
2734  err |= __put_user(from->si_pid, &to->si_pid);
2735  err |= __put_user(from->si_uid, &to->si_uid);
2736  break;
2737  }
2738  return err;
2739 }
2740 
2741 #endif
2742 
2749 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2750  const struct timespec *ts)
2751 {
2752  struct task_struct *tsk = current;
2754  sigset_t mask = *which;
2755  int sig;
2756 
2757  if (ts) {
2758  if (!timespec_valid(ts))
2759  return -EINVAL;
2760  timeout = timespec_to_jiffies(ts);
2761  /*
2762  * We can be close to the next tick, add another one
2763  * to ensure we will wait at least the time asked for.
2764  */
2765  if (ts->tv_sec || ts->tv_nsec)
2766  timeout++;
2767  }
2768 
2769  /*
2770  * Invert the set of allowed signals to get those we want to block.
2771  */
2772  sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2773  signotset(&mask);
2774 
2775  spin_lock_irq(&tsk->sighand->siglock);
2776  sig = dequeue_signal(tsk, &mask, info);
2777  if (!sig && timeout) {
2778  /*
2779  * None ready, temporarily unblock those we're interested
2780  * while we are sleeping in so that we'll be awakened when
2781  * they arrive. Unblocking is always fine, we can avoid
2782  * set_current_blocked().
2783  */
2784  tsk->real_blocked = tsk->blocked;
2785  sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2787  spin_unlock_irq(&tsk->sighand->siglock);
2788 
2789  timeout = schedule_timeout_interruptible(timeout);
2790 
2791  spin_lock_irq(&tsk->sighand->siglock);
2792  __set_task_blocked(tsk, &tsk->real_blocked);
2793  siginitset(&tsk->real_blocked, 0);
2794  sig = dequeue_signal(tsk, &mask, info);
2795  }
2796  spin_unlock_irq(&tsk->sighand->siglock);
2797 
2798  if (sig)
2799  return sig;
2800  return timeout ? -EINTR : -EAGAIN;
2801 }
2802 
2811 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2812  siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2813  size_t, sigsetsize)
2814 {
2815  sigset_t these;
2816  struct timespec ts;
2817  siginfo_t info;
2818  int ret;
2819 
2820  /* XXX: Don't preclude handling different sized sigset_t's. */
2821  if (sigsetsize != sizeof(sigset_t))
2822  return -EINVAL;
2823 
2824  if (copy_from_user(&these, uthese, sizeof(these)))
2825  return -EFAULT;
2826 
2827  if (uts) {
2828  if (copy_from_user(&ts, uts, sizeof(ts)))
2829  return -EFAULT;
2830  }
2831 
2832  ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2833 
2834  if (ret > 0 && uinfo) {
2835  if (copy_siginfo_to_user(uinfo, &info))
2836  ret = -EFAULT;
2837  }
2838 
2839  return ret;
2840 }
2841 
2847 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2848 {
2849  struct siginfo info;
2850 
2851  info.si_signo = sig;
2852  info.si_errno = 0;
2853  info.si_code = SI_USER;
2854  info.si_pid = task_tgid_vnr(current);
2855  info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2856 
2857  return kill_something_info(sig, &info, pid);
2858 }
2859 
2860 static int
2861 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2862 {
2863  struct task_struct *p;
2864  int error = -ESRCH;
2865 
2866  rcu_read_lock();
2867  p = find_task_by_vpid(pid);
2868  if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2869  error = check_kill_permission(sig, info, p);
2870  /*
2871  * The null signal is a permissions and process existence
2872  * probe. No signal is actually delivered.
2873  */
2874  if (!error && sig) {
2875  error = do_send_sig_info(sig, info, p, false);
2876  /*
2877  * If lock_task_sighand() failed we pretend the task
2878  * dies after receiving the signal. The window is tiny,
2879  * and the signal is private anyway.
2880  */
2881  if (unlikely(error == -ESRCH))
2882  error = 0;
2883  }
2884  }
2885  rcu_read_unlock();
2886 
2887  return error;
2888 }
2889 
2890 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2891 {
2892  struct siginfo info;
2893 
2894  info.si_signo = sig;
2895  info.si_errno = 0;
2896  info.si_code = SI_TKILL;
2897  info.si_pid = task_tgid_vnr(current);
2898  info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2899 
2900  return do_send_specific(tgid, pid, sig, &info);
2901 }
2902 
2913 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2914 {
2915  /* This is only valid for single tasks */
2916  if (pid <= 0 || tgid <= 0)
2917  return -EINVAL;
2918 
2919  return do_tkill(tgid, pid, sig);
2920 }
2921 
2929 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2930 {
2931  /* This is only valid for single tasks */
2932  if (pid <= 0)
2933  return -EINVAL;
2934 
2935  return do_tkill(0, pid, sig);
2936 }
2937 
2944 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2945  siginfo_t __user *, uinfo)
2946 {
2947  siginfo_t info;
2948 
2949  if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2950  return -EFAULT;
2951 
2952  /* Not even root can pretend to send signals from the kernel.
2953  * Nor can they impersonate a kill()/tgkill(), which adds source info.
2954  */
2955  if (info.si_code >= 0 || info.si_code == SI_TKILL) {
2956  /* We used to allow any < 0 si_code */
2957  WARN_ON_ONCE(info.si_code < 0);
2958  return -EPERM;
2959  }
2960  info.si_signo = sig;
2961 
2962  /* POSIX.1b doesn't mention process groups. */
2963  return kill_proc_info(sig, &info, pid);
2964 }
2965 
2966 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2967 {
2968  /* This is only valid for single tasks */
2969  if (pid <= 0 || tgid <= 0)
2970  return -EINVAL;
2971 
2972  /* Not even root can pretend to send signals from the kernel.
2973  * Nor can they impersonate a kill()/tgkill(), which adds source info.
2974  */
2975  if (info->si_code >= 0 || info->si_code == SI_TKILL) {
2976  /* We used to allow any < 0 si_code */
2977  WARN_ON_ONCE(info->si_code < 0);
2978  return -EPERM;
2979  }
2980  info->si_signo = sig;
2981 
2982  return do_send_specific(tgid, pid, sig, info);
2983 }
2984 
2985 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2986  siginfo_t __user *, uinfo)
2987 {
2988  siginfo_t info;
2989 
2990  if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2991  return -EFAULT;
2992 
2993  return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2994 }
2995 
2996 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2997 {
2998  struct task_struct *t = current;
2999  struct k_sigaction *k;
3000  sigset_t mask;
3001 
3002  if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3003  return -EINVAL;
3004 
3005  k = &t->sighand->action[sig-1];
3006 
3007  spin_lock_irq(&current->sighand->siglock);
3008  if (oact)
3009  *oact = *k;
3010 
3011  if (act) {
3012  sigdelsetmask(&act->sa.sa_mask,
3014  *k = *act;
3015  /*
3016  * POSIX 3.3.1.3:
3017  * "Setting a signal action to SIG_IGN for a signal that is
3018  * pending shall cause the pending signal to be discarded,
3019  * whether or not it is blocked."
3020  *
3021  * "Setting a signal action to SIG_DFL for a signal that is
3022  * pending and whose default action is to ignore the signal
3023  * (for example, SIGCHLD), shall cause the pending signal to
3024  * be discarded, whether or not it is blocked"
3025  */
3026  if (sig_handler_ignored(sig_handler(t, sig), sig)) {
3027  sigemptyset(&mask);
3028  sigaddset(&mask, sig);
3029  rm_from_queue_full(&mask, &t->signal->shared_pending);
3030  do {
3031  rm_from_queue_full(&mask, &t->pending);
3032  t = next_thread(t);
3033  } while (t != current);
3034  }
3035  }
3036 
3037  spin_unlock_irq(&current->sighand->siglock);
3038  return 0;
3039 }
3040 
3041 int
3042 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3043 {
3044  stack_t oss;
3045  int error;
3046 
3047  oss.ss_sp = (void __user *) current->sas_ss_sp;
3048  oss.ss_size = current->sas_ss_size;
3049  oss.ss_flags = sas_ss_flags(sp);
3050 
3051  if (uss) {
3052  void __user *ss_sp;
3053  size_t ss_size;
3054  int ss_flags;
3055 
3056  error = -EFAULT;
3057  if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3058  goto out;
3059  error = __get_user(ss_sp, &uss->ss_sp) |
3060  __get_user(ss_flags, &uss->ss_flags) |
3061  __get_user(ss_size, &uss->ss_size);
3062  if (error)
3063  goto out;
3064 
3065  error = -EPERM;
3066  if (on_sig_stack(sp))
3067  goto out;
3068 
3069  error = -EINVAL;
3070  /*
3071  * Note - this code used to test ss_flags incorrectly:
3072  * old code may have been written using ss_flags==0
3073  * to mean ss_flags==SS_ONSTACK (as this was the only
3074  * way that worked) - this fix preserves that older
3075  * mechanism.
3076  */
3077  if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3078  goto out;
3079 
3080  if (ss_flags == SS_DISABLE) {
3081  ss_size = 0;
3082  ss_sp = NULL;
3083  } else {
3084  error = -ENOMEM;
3085  if (ss_size < MINSIGSTKSZ)
3086  goto out;
3087  }
3088 
3089  current->sas_ss_sp = (unsigned long) ss_sp;
3090  current->sas_ss_size = ss_size;
3091  }
3092 
3093  error = 0;
3094  if (uoss) {
3095  error = -EFAULT;
3096  if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3097  goto out;
3098  error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3099  __put_user(oss.ss_size, &uoss->ss_size) |
3100  __put_user(oss.ss_flags, &uoss->ss_flags);
3101  }
3102 
3103 out:
3104  return error;
3105 }
3106 
3107 #ifdef __ARCH_WANT_SYS_SIGPENDING
3108 
3113 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3114 {
3115  return do_sigpending(set, sizeof(*set));
3116 }
3117 
3118 #endif
3119 
3120 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3121 
3131 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3132  old_sigset_t __user *, oset)
3133 {
3134  old_sigset_t old_set, new_set;
3135  sigset_t new_blocked;
3136 
3137  old_set = current->blocked.sig[0];
3138 
3139  if (nset) {
3140  if (copy_from_user(&new_set, nset, sizeof(*nset)))
3141  return -EFAULT;
3142  new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
3143 
3144  new_blocked = current->blocked;
3145 
3146  switch (how) {
3147  case SIG_BLOCK:
3148  sigaddsetmask(&new_blocked, new_set);
3149  break;
3150  case SIG_UNBLOCK:
3151  sigdelsetmask(&new_blocked, new_set);
3152  break;
3153  case SIG_SETMASK:
3154  new_blocked.sig[0] = new_set;
3155  break;
3156  default:
3157  return -EINVAL;
3158  }
3159 
3160  __set_current_blocked(&new_blocked);
3161  }
3162 
3163  if (oset) {
3164  if (copy_to_user(oset, &old_set, sizeof(*oset)))
3165  return -EFAULT;
3166  }
3167 
3168  return 0;
3169 }
3170 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3171 
3172 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
3173 
3180 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3181  const struct sigaction __user *, act,
3182  struct sigaction __user *, oact,
3183  size_t, sigsetsize)
3184 {
3185  struct k_sigaction new_sa, old_sa;
3186  int ret = -EINVAL;
3187 
3188  /* XXX: Don't preclude handling different sized sigset_t's. */
3189  if (sigsetsize != sizeof(sigset_t))
3190  goto out;
3191 
3192  if (act) {
3193  if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3194  return -EFAULT;
3195  }
3196 
3197  ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3198 
3199  if (!ret && oact) {
3200  if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3201  return -EFAULT;
3202  }
3203 out:
3204  return ret;
3205 }
3206 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
3207 
3208 #ifdef __ARCH_WANT_SYS_SGETMASK
3209 
3210 /*
3211  * For backwards compatibility. Functionality superseded by sigprocmask.
3212  */
3213 SYSCALL_DEFINE0(sgetmask)
3214 {
3215  /* SMP safe */
3216  return current->blocked.sig[0];
3217 }
3218 
3219 SYSCALL_DEFINE1(ssetmask, int, newmask)
3220 {
3221  int old = current->blocked.sig[0];
3222  sigset_t newset;
3223 
3224  set_current_blocked(&newset);
3225 
3226  return old;
3227 }
3228 #endif /* __ARCH_WANT_SGETMASK */
3229 
3230 #ifdef __ARCH_WANT_SYS_SIGNAL
3231 /*
3232  * For backwards compatibility. Functionality superseded by sigaction.
3233  */
3234 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3235 {
3236  struct k_sigaction new_sa, old_sa;
3237  int ret;
3238 
3239  new_sa.sa.sa_handler = handler;
3240  new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3241  sigemptyset(&new_sa.sa.sa_mask);
3242 
3243  ret = do_sigaction(sig, &new_sa, &old_sa);
3244 
3245  return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3246 }
3247 #endif /* __ARCH_WANT_SYS_SIGNAL */
3248 
3249 #ifdef __ARCH_WANT_SYS_PAUSE
3250 
3252 {
3253  while (!signal_pending(current)) {
3254  current->state = TASK_INTERRUPTIBLE;
3255  schedule();
3256  }
3257  return -ERESTARTNOHAND;
3258 }
3259 
3260 #endif
3261 
3263 {
3264  current->saved_sigmask = current->blocked;
3265  set_current_blocked(set);
3266 
3267  current->state = TASK_INTERRUPTIBLE;
3268  schedule();
3269  set_restore_sigmask();
3270  return -ERESTARTNOHAND;
3271 }
3272 
3273 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
3274 
3280 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3281 {
3282  sigset_t newset;
3283 
3284  /* XXX: Don't preclude handling different sized sigset_t's. */
3285  if (sigsetsize != sizeof(sigset_t))
3286  return -EINVAL;
3287 
3288  if (copy_from_user(&newset, unewset, sizeof(newset)))
3289  return -EFAULT;
3290  return sigsuspend(&newset);
3291 }
3292 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
3293 
3295 {
3296  return NULL;
3297 }
3298 
3300 {
3301  sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3302 }
3303 
3304 #ifdef CONFIG_KGDB_KDB
3305 #include <linux/kdb.h>
3306 /*
3307  * kdb_send_sig_info - Allows kdb to send signals without exposing
3308  * signal internals. This function checks if the required locks are
3309  * available before calling the main signal code, to avoid kdb
3310  * deadlocks.
3311  */
3312 void
3313 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3314 {
3315  static struct task_struct *kdb_prev_t;
3316  int sig, new_t;
3317  if (!spin_trylock(&t->sighand->siglock)) {
3318  kdb_printf("Can't do kill command now.\n"
3319  "The sigmask lock is held somewhere else in "
3320  "kernel, try again later\n");
3321  return;
3322  }
3323  spin_unlock(&t->sighand->siglock);
3324  new_t = kdb_prev_t != t;
3325  kdb_prev_t = t;
3326  if (t->state != TASK_RUNNING && new_t) {
3327  kdb_printf("Process is not RUNNING, sending a signal from "
3328  "kdb risks deadlock\n"
3329  "on the run queue locks. "
3330  "The signal has _not_ been sent.\n"
3331  "Reissue the kill command if you want to risk "
3332  "the deadlock.\n");
3333  return;
3334  }
3335  sig = info->si_signo;
3336  if (send_sig_info(sig, info, t))
3337  kdb_printf("Fail to deliver Signal %d to process %d.\n",
3338  sig, t->pid);
3339  else
3340  kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3341 }
3342 #endif /* CONFIG_KGDB_KDB */