8 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/capability.h>
13 #include <linux/personality.h>
14 #include <linux/tty.h>
19 #include <linux/acct.h>
23 #include <linux/binfmts.h>
26 #include <linux/ptrace.h>
31 #include <linux/mempolicy.h>
37 #include <linux/signal.h>
39 #include <linux/cn_proc.h>
41 #include <linux/futex.h>
43 #include <linux/audit.h>
44 #include <linux/resource.h>
50 #include <linux/perf_event.h>
52 #include <linux/hw_breakpoint.h>
53 #include <linux/oom.h>
55 #include <linux/shm.h>
57 #include <asm/uaccess.h>
58 #include <asm/unistd.h>
59 #include <asm/pgtable.h>
60 #include <asm/mmu_context.h>
64 static void __unhash_process(
struct task_struct *
p,
bool group_dead)
72 list_del_rcu(&p->
tasks);
97 bool group_dead = thread_group_leader(tsk);
102 lockdep_tasklist_lock_is_held());
116 if (
unlikely(has_group_leader_pid(tsk)))
145 sig->
inblock += task_io_get_inblock(tsk);
146 sig->
oublock += task_io_get_oublock(tsk);
147 task_io_accounting_add(&sig->
ioac, &tsk->
ioac);
152 __unhash_process(tsk, group_dead);
160 spin_unlock(&sighand->
siglock);
170 static void delayed_put_task_struct(
struct rcu_head *rhp)
175 trace_sched_process_free(tsk);
176 put_task_struct(tsk);
194 ptrace_release_task(p);
240 sid = task_session(p);
253 static int will_become_orphaned_pgrp(
struct pid *pgrp,
struct task_struct *ignored_task)
258 if ((p == ignored_task) ||
276 retval = will_become_orphaned_pgrp(task_pgrp(
current),
NULL);
282 static bool has_stopped_jobs(
struct pid *pgrp)
302 struct pid *pgrp = task_pgrp(tsk);
316 if (task_pgrp(parent) != pgrp &&
317 task_session(parent) == task_session(tsk) &&
318 will_become_orphaned_pgrp(pgrp, ignored_task) &&
319 has_stopped_jobs(pgrp)) {
337 static void reparent_to_kthreadd(
void)
344 list_move_tail(&
current->sibling, &
current->real_parent->children);
355 sizeof(
current->signal->rlim));
366 if (task_session(curr) != pid)
369 if (task_pgrp(curr) != pid)
373 static void set_special_pids(
struct pid *
pid)
386 if (!valid_signal(sig) || sig < 1)
389 spin_lock_irq(&
current->sighand->siglock);
391 sigdelset(&
current->blocked, sig);
399 spin_unlock_irq(&
current->sighand->siglock);
407 if (!valid_signal(sig) || sig < 1)
410 spin_lock_irq(&
current->sighand->siglock);
413 spin_unlock_irq(&
current->sighand->siglock);
453 sigfillset(&blocked);
462 reparent_to_kthreadd();
467 #ifdef CONFIG_MM_OWNER
498 goto assign_new_owner;
506 goto assign_new_owner;
515 goto assign_new_owner;
578 self.next =
xchg(&core_state->
dumper.next, &
self);
603 mm_update_next_owner(mm);
633 panic(
"Attempted to kill init! exitcode=0x%08x\n",
634 father->signal->group_exit_code ?:
640 }
else if (father->signal->has_child_subreaper) {
655 if (!reaper->
signal->is_child_subreaper)
693 list_move_tail(&p->
sibling, dead);
697 kill_orphaned_pgrp(p, father);
700 static void forget_original_parent(
struct task_struct *father)
711 reaper = find_new_reaper(father);
717 if (t->
parent == father) {
725 reparent_leader(father, p, &dead_children);
741 static void exit_notify(
struct task_struct *tsk,
int group_dead)
753 forget_original_parent(tsk);
761 int sig = thread_group_leader(tsk) &&
762 thread_group_empty(tsk) &&
763 !ptrace_reparented(tsk) ?
766 }
else if (thread_group_leader(tsk)) {
767 autoreap = thread_group_empty(tsk) &&
785 #ifdef CONFIG_DEBUG_STACK_USAGE
786 static void check_stack_usage(
void)
792 free = stack_not_used(
current);
794 if (free >= lowest_to_date)
797 spin_lock(&low_water_lock);
798 if (free < lowest_to_date) {
802 lowest_to_date =
free;
804 spin_unlock(&low_water_lock);
807 static inline void check_stack_usage(
void) {}
817 WARN_ON(blk_needs_flush_plug(tsk));
820 panic(
"Aiee, killing interrupt handler!");
822 panic(
"Attempted to kill the idle task!");
835 validate_creds_for_do_exit(tsk);
843 "Fixing recursive fault but reboot is needed!\n");
871 acct_update_integrals(tsk);
874 sync_mm_rss(tsk->
mm);
880 setmax_mm_hiwater_rss(&tsk->
signal->maxrss, tsk->
mm);
894 trace_sched_process_exit(tsk);
924 ptrace_put_breakpoints(tsk);
926 exit_notify(tsk, group_dead);
929 mpol_put(tsk->mempolicy);
930 tsk->mempolicy =
NULL;
957 validate_creds_for_do_exit(tsk);
1015 BUG_ON(exit_code & 0x80);
1017 if (signal_group_exit(sig))
1019 else if (!thread_group_empty(
current)) {
1021 spin_lock_irq(&sighand->
siglock);
1022 if (signal_group_exit(sig))
1030 spin_unlock_irq(&sighand->
siglock);
1078 if (!eligible_pid(wo, p))
1109 retval =
put_user(pid, &infop->si_pid);
1111 retval =
put_user(uid, &infop->si_uid);
1113 retval =
put_user(status, &infop->si_status);
1128 unsigned long state;
1130 pid_t pid = task_pid_vnr(p);
1143 if ((exit_code & 0x7f) == 0) {
1145 status = exit_code >> 8;
1148 status = exit_code & 0x7f;
1150 return wait_noreap_copyout(wo, p, pid, uid, why, status);
1163 traced = ptrace_reparented(p);
1168 if (
likely(!traced) && thread_group_leader(p)) {
1209 task_io_get_inblock(p) +
1212 task_io_get_oublock(p) +
1217 task_io_accounting_add(&psig->
ioac, &p->
ioac);
1218 task_io_accounting_add(&psig->
ioac, &sig->
ioac);
1219 spin_unlock_irq(&p->
real_parent->sighand->siglock);
1236 if (!retval && infop)
1238 if (!retval && infop)
1240 if (!retval && infop) {
1243 if ((status & 0x7f) == 0) {
1252 retval =
put_user(status, &infop->si_status);
1254 if (!retval && infop)
1255 retval =
put_user(pid, &infop->si_pid);
1256 if (!retval && infop)
1257 retval =
put_user(uid, &infop->si_uid);
1269 if (thread_group_leader(p) &&
1282 static int *task_stopped_code(
struct task_struct *p,
bool ptrace)
1290 return &p->
signal->group_exit_code;
1313 static int wait_task_stopped(
struct wait_opts *wo,
1317 int retval, exit_code, *p_code, why;
1327 if (!task_stopped_code(p, ptrace))
1331 spin_lock_irq(&p->
sighand->siglock);
1333 p_code = task_stopped_code(p, ptrace);
1337 exit_code = *p_code;
1346 spin_unlock_irq(&p->
sighand->siglock);
1358 pid = task_pid_vnr(p);
1363 return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
1371 if (!retval && infop)
1373 if (!retval && infop)
1375 if (!retval && infop)
1377 if (!retval && infop)
1378 retval =
put_user(exit_code, &infop->si_status);
1379 if (!retval && infop)
1380 retval =
put_user(pid, &infop->si_pid);
1381 if (!retval && infop)
1382 retval =
put_user(uid, &infop->si_uid);
1409 spin_lock_irq(&p->
sighand->siglock);
1412 spin_unlock_irq(&p->
sighand->siglock);
1418 spin_unlock_irq(&p->
sighand->siglock);
1420 pid = task_pid_vnr(p);
1433 retval = wait_noreap_copyout(wo, p, pid, uid,
1450 static int wait_consider_task(
struct wait_opts *wo,
int ptrace,
1453 int ret = eligible_child(wo, p);
1497 return wait_task_zombie(wo, p);
1532 if (
likely(!ptrace) && p->
ptrace && !ptrace_reparented(p))
1546 ret = wait_task_stopped(wo, ptrace, p);
1555 return wait_task_continued(wo, p);
1572 int ret = wait_consider_task(wo, 0, p);
1585 int ret = wait_consider_task(wo, 1, p);
1600 if (!eligible_pid(wo, p))
1615 static long do_wait(
struct wait_opts *wo)
1620 trace_sched_process_wait(wo->
wo_pid);
1622 init_waitqueue_func_entry(&wo->
child_wait, child_wait_callback);
1641 retval = do_wait_thread(wo, tsk);
1645 retval = ptrace_do_wait(wo, tsk);
1658 if (!signal_pending(
current)) {
1673 struct pid *pid =
NULL;
1730 ret =
put_user(0, &infop->si_status);
1744 struct pid *pid =
NULL;
1754 else if (
upid < 0) {
1757 }
else if (
upid == 0) {
1779 #ifdef __ARCH_WANT_SYS_WAITPID