11 #include <asm/param.h>
13 #include <linux/capability.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/timex.h>
19 #include <linux/rbtree.h>
22 #include <linux/errno.h>
27 #include <asm/ptrace.h>
28 #include <asm/cputime.h>
31 #include <linux/sem.h>
32 #include <linux/signal.h>
33 #include <linux/compiler.h>
39 #include <linux/seccomp.h>
44 #include <linux/time.h>
46 #include <linux/resource.h>
48 #include <linux/hrtimer.h>
55 #include <asm/processor.h>
69 #define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
85 #define FIXED_1 (1<<FSHIFT)
86 #define LOAD_FREQ (5*HZ+1)
91 #define CALC_LOAD(load,exp,n) \
93 load += n*(FIXED_1-exp); \
115 #ifdef CONFIG_SCHED_DEBUG
144 #define TASK_RUNNING 0
145 #define TASK_INTERRUPTIBLE 1
146 #define TASK_UNINTERRUPTIBLE 2
147 #define __TASK_STOPPED 4
148 #define __TASK_TRACED 8
150 #define EXIT_ZOMBIE 16
154 #define TASK_WAKEKILL 128
155 #define TASK_WAKING 256
156 #define TASK_STATE_MAX 512
158 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
164 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
165 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
166 #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
169 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
170 #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
173 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
174 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
177 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
178 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
179 #define task_is_dead(task) ((task)->exit_state != 0)
180 #define task_is_stopped_or_traced(task) \
181 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
182 #define task_contributes_to_load(task) \
183 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
184 (task->flags & PF_FROZEN) == 0)
186 #define __set_task_state(tsk, state_value) \
187 do { (tsk)->state = (state_value); } while (0)
188 #define set_task_state(tsk, state_value) \
189 set_mb((tsk)->state, (state_value))
202 #define __set_current_state(state_value) \
203 do { current->state = (state_value); } while (0)
204 #define set_current_state(state_value) \
205 set_mb(current->state, (state_value))
208 #define TASK_COMM_LEN 16
223 #ifdef CONFIG_PROVE_RCU
224 extern int lockdep_tasklist_lock_is_held(
void);
235 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
236 extern void nohz_balance_enter_idle(
int cpu);
237 extern void set_cpu_sd_state_idle(
void);
238 extern int get_nohz_timer_target(
void);
240 static inline void nohz_balance_enter_idle(
int cpu) { }
241 static inline void set_cpu_sd_state_idle(
void) { }
273 #ifdef CONFIG_LOCKUP_DETECTOR
279 size_t *lenp, loff_t *ppos);
297 #ifdef CONFIG_DETECT_HUNG_TASK
304 size_t *lenp, loff_t *ppos);
307 enum { sysctl_hung_task_timeout_secs = 0 };
311 #define __sched __attribute__((__section__(".sched.text")))
319 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
343 #define MAPCOUNT_ELF_CORE_MARGIN (5)
344 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
354 unsigned long,
unsigned long);
357 unsigned long len,
unsigned long pgoff,
358 unsigned long flags);
370 #define SUID_DUMPABLE_DISABLED 0
371 #define SUID_DUMPABLE_ENABLED 1
372 #define SUID_DUMPABLE_SAFE 2
376 #define MMF_DUMPABLE 0
377 #define MMF_DUMP_SECURELY 1
379 #define MMF_DUMPABLE_BITS 2
380 #define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
383 #define MMF_DUMP_ANON_PRIVATE 2
384 #define MMF_DUMP_ANON_SHARED 3
385 #define MMF_DUMP_MAPPED_PRIVATE 4
386 #define MMF_DUMP_MAPPED_SHARED 5
387 #define MMF_DUMP_ELF_HEADERS 6
388 #define MMF_DUMP_HUGETLB_PRIVATE 7
389 #define MMF_DUMP_HUGETLB_SHARED 8
391 #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
392 #define MMF_DUMP_FILTER_BITS 7
393 #define MMF_DUMP_FILTER_MASK \
394 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
395 #define MMF_DUMP_FILTER_DEFAULT \
396 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
397 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
399 #ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
400 # define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
402 # define MMF_DUMP_MASK_DEFAULT_ELF 0
405 #define MMF_VM_MERGEABLE 16
406 #define MMF_VM_HUGEPAGE 17
407 #define MMF_EXE_FILE_CHANGED 18
409 #define MMF_HAS_UPROBES 19
410 #define MMF_RECALC_UPROBES 20
412 #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
453 #define prof_exp stime
454 #define virt_exp utime
455 #define sched_exp sum_exec_runtime
457 #define INIT_CPUTIME \
458 (struct task_cputime) { \
461 .sum_exec_runtime = 0, \
471 #define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
571 #ifdef CONFIG_SCHED_AUTOGROUP
572 struct autogroup *autogroup;
583 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
611 #ifdef CONFIG_BSD_PROCESS_ACCT
614 #ifdef CONFIG_TASKSTATS
621 #ifdef CONFIG_CGROUPS
646 #define SIGNAL_STOP_STOPPED 0x00000001
647 #define SIGNAL_STOP_CONTINUED 0x00000002
648 #define SIGNAL_GROUP_EXIT 0x00000004
652 #define SIGNAL_CLD_STOPPED 0x00000010
653 #define SIGNAL_CLD_CONTINUED 0x00000020
654 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
656 #define SIGNAL_UNKILLABLE 0x00000040
673 #ifdef CONFIG_INOTIFY_USER
677 #ifdef CONFIG_FANOTIFY
683 #ifdef CONFIG_POSIX_MQUEUE
685 unsigned long mq_bytes;
690 struct key *uid_keyring;
691 struct key *session_keyring;
698 #ifdef CONFIG_PERF_EVENTS
708 #define INIT_USER (&root_user)
714 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
717 unsigned long pcount;
718 unsigned long long run_delay;
721 unsigned long long last_arrival,
726 #ifdef CONFIG_TASK_DELAY_ACCT
727 struct task_delay_info {
746 struct timespec blkio_start, blkio_end;
754 struct timespec freepages_start, freepages_end;
760 static inline int sched_info_on(
void)
762 #ifdef CONFIG_SCHEDSTATS
764 #elif defined(CONFIG_TASK_DELAY_ACCT)
765 extern int delayacct_on;
792 # define SCHED_LOAD_RESOLUTION 10
793 # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
794 # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
796 # define SCHED_LOAD_RESOLUTION 0
797 # define scale_load(w) (w)
798 # define scale_load_down(w) (w)
801 #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
802 #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
807 #define SCHED_POWER_SHIFT 10
808 #define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT)
814 #define SD_LOAD_BALANCE 0x0001
815 #define SD_BALANCE_NEWIDLE 0x0002
816 #define SD_BALANCE_EXEC 0x0004
817 #define SD_BALANCE_FORK 0x0008
818 #define SD_BALANCE_WAKE 0x0010
819 #define SD_WAKE_AFFINE 0x0020
820 #define SD_SHARE_CPUPOWER 0x0080
821 #define SD_SHARE_PKG_RESOURCES 0x0200
822 #define SD_SERIALIZE 0x0400
823 #define SD_ASYM_PACKING 0x0800
824 #define SD_PREFER_SIBLING 0x1000
825 #define SD_OVERLAP 0x2000
827 extern int __weak arch_sd_sibiling_asym_packing(
void);
829 struct sched_group_power {
835 unsigned int power, power_orig;
836 unsigned long next_update;
846 struct sched_group *
next;
849 unsigned int group_weight;
850 struct sched_group_power *sgp;
862 static inline struct cpumask *sched_group_cpus(
struct sched_group *
sg)
871 static inline struct cpumask *sched_group_mask(
struct sched_group *
sg)
880 static inline unsigned int group_first_cpu(
struct sched_group *
group)
882 return cpumask_first(sched_group_cpus(group));
885 struct sched_domain_attr {
886 int relax_domain_level;
889 #define SD_ATTR_INIT (struct sched_domain_attr) { \
890 .relax_domain_level = -1, \
893 extern int sched_domain_level_max;
895 struct sched_domain {
897 struct sched_domain *
parent;
898 struct sched_domain *
child;
899 struct sched_group *groups;
900 unsigned long min_interval;
901 unsigned long max_interval;
902 unsigned int busy_factor;
903 unsigned int imbalance_pct;
904 unsigned int cache_nice_tries;
905 unsigned int busy_idx;
906 unsigned int idle_idx;
907 unsigned int newidle_idx;
908 unsigned int wake_idx;
909 unsigned int forkexec_idx;
910 unsigned int smt_gain;
915 unsigned long last_balance;
916 unsigned int balance_interval;
917 unsigned int nr_balance_failed;
921 #ifdef CONFIG_SCHEDSTATS
933 unsigned int alb_count;
934 unsigned int alb_failed;
935 unsigned int alb_pushed;
938 unsigned int sbe_count;
939 unsigned int sbe_balanced;
940 unsigned int sbe_pushed;
943 unsigned int sbf_count;
944 unsigned int sbf_balanced;
945 unsigned int sbf_pushed;
948 unsigned int ttwu_wake_remote;
949 unsigned int ttwu_move_affine;
950 unsigned int ttwu_move_balance;
952 #ifdef CONFIG_SCHED_DEBUG
960 unsigned int span_weight;
968 unsigned long span[0];
971 static inline struct cpumask *sched_domain_span(
struct sched_domain *
sd)
976 extern void partition_sched_domains(
int ndoms_new,
cpumask_var_t doms_new[],
977 struct sched_domain_attr *dattr_new);
981 void free_sched_domains(
cpumask_var_t doms[],
unsigned int ndoms);
984 static inline int test_sd_parent(
struct sched_domain *
sd,
int flag)
986 if (sd->parent && (sd->parent->flags & flag))
992 unsigned long default_scale_freq_power(
struct sched_domain *sd,
int cpu);
993 unsigned long default_scale_smt_power(
struct sched_domain *sd,
int cpu);
995 bool cpus_share_cache(
int this_cpu,
int that_cpu);
999 struct sched_domain_attr;
1002 partition_sched_domains(
int ndoms_new,
cpumask_var_t doms_new[],
1003 struct sched_domain_attr *dattr_new)
1007 static inline bool cpus_share_cache(
int this_cpu,
int that_cpu)
1018 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1021 static inline void prefetch_stack(
struct task_struct *
t) { }
1030 struct sched_domain;
1035 #define WF_SYNC 0x01
1036 #define WF_FORK 0x02
1037 #define WF_MIGRATED 0x04
1039 #define ENQUEUE_WAKEUP 1
1040 #define ENQUEUE_HEAD 2
1042 #define ENQUEUE_WAKING 4
1044 #define ENQUEUE_WAKING 0
1047 #define DEQUEUE_SLEEP 1
1071 const struct cpumask *newmask);
1089 #ifdef CONFIG_FAIR_GROUP_SCHED
1098 #ifdef CONFIG_SCHEDSTATS
1099 struct sched_statistics {
1109 s64 sum_sleep_runtime;
1116 u64 nr_migrations_cold;
1117 u64 nr_failed_migrations_affine;
1118 u64 nr_failed_migrations_running;
1119 u64 nr_failed_migrations_hot;
1120 u64 nr_forced_migrations;
1123 u64 nr_wakeups_sync;
1124 u64 nr_wakeups_migrate;
1125 u64 nr_wakeups_local;
1126 u64 nr_wakeups_remote;
1127 u64 nr_wakeups_affine;
1128 u64 nr_wakeups_affine_attempts;
1129 u64 nr_wakeups_passive;
1130 u64 nr_wakeups_idle;
1147 #ifdef CONFIG_SCHEDSTATS
1148 struct sched_statistics statistics;
1151 #ifdef CONFIG_FAIR_GROUP_SCHED
1166 #ifdef CONFIG_RT_GROUP_SCHED
1179 #define RR_TIMESLICE (100 * HZ / 1000)
1208 #ifdef CONFIG_CGROUP_SCHED
1209 struct task_group *sched_task_group;
1212 #ifdef CONFIG_PREEMPT_NOTIFIERS
1226 #ifdef CONFIG_BLK_DEV_IO_TRACE
1227 unsigned int btrace_seq;
1234 #ifdef CONFIG_PREEMPT_RCU
1235 int rcu_read_lock_nesting;
1236 char rcu_read_unlock_special;
1239 #ifdef CONFIG_TREE_PREEMPT_RCU
1242 #ifdef CONFIG_RCU_BOOST
1246 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1247 struct sched_info sched_info;
1256 #ifdef CONFIG_COMPAT_BRK
1257 unsigned brk_randomized:1;
1259 #if defined(SPLIT_RSS_COUNTING)
1260 struct task_rss_stat rss_stat;
1284 #ifdef CONFIG_CC_STACKPROTECTOR
1286 unsigned long stack_canary;
1320 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1343 #ifdef CONFIG_SYSVIPC
1347 #ifdef CONFIG_DETECT_HUNG_TASK
1349 unsigned long last_switch_count;
1375 #ifdef CONFIG_AUDITSYSCALL
1377 unsigned int sessionid;
1391 #ifdef CONFIG_RT_MUTEXES
1398 #ifdef CONFIG_DEBUG_MUTEXES
1402 #ifdef CONFIG_TRACE_IRQFLAGS
1403 unsigned int irq_events;
1404 unsigned long hardirq_enable_ip;
1405 unsigned long hardirq_disable_ip;
1406 unsigned int hardirq_enable_event;
1407 unsigned int hardirq_disable_event;
1408 int hardirqs_enabled;
1409 int hardirq_context;
1410 unsigned long softirq_disable_ip;
1411 unsigned long softirq_enable_ip;
1412 unsigned int softirq_disable_event;
1413 unsigned int softirq_enable_event;
1414 int softirqs_enabled;
1415 int softirq_context;
1417 #ifdef CONFIG_LOCKDEP
1418 # define MAX_LOCK_DEPTH 48UL
1421 unsigned int lockdep_recursion;
1423 gfp_t lockdep_reclaim_gfp;
1447 #if defined(CONFIG_TASK_XACCT)
1452 #ifdef CONFIG_CPUSETS
1455 int cpuset_mem_spread_rotor;
1456 int cpuset_slab_spread_rotor;
1458 #ifdef CONFIG_CGROUPS
1460 struct css_set
__rcu *cgroups;
1466 #ifdef CONFIG_COMPAT
1467 struct compat_robust_list_head
__user *compat_robust_list;
1472 #ifdef CONFIG_PERF_EVENTS
1474 struct mutex perf_event_mutex;
1480 short pref_node_fork;
1491 #ifdef CONFIG_TASK_DELAY_ACCT
1492 struct task_delay_info *delays;
1494 #ifdef CONFIG_FAULT_INJECTION
1505 #ifdef CONFIG_LATENCYTOP
1506 int latency_record_count;
1507 struct latency_record latency_record[LT_SAVECOUNT];
1516 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1522 unsigned long long ftrace_timestamp;
1531 #ifdef CONFIG_TRACING
1533 unsigned long trace;
1535 unsigned long trace_recursion;
1538 struct memcg_batch_info {
1541 unsigned long nr_pages;
1542 unsigned long memsw_nr_pages;
1545 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1548 #ifdef CONFIG_UPROBES
1549 struct uprobe_task *utask;
1554 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1569 #define MAX_USER_RT_PRIO 100
1570 #define MAX_RT_PRIO MAX_USER_RT_PRIO
1572 #define MAX_PRIO (MAX_RT_PRIO + 40)
1573 #define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1575 static inline int rt_prio(
int prio)
1584 return rt_prio(p->
prio);
1656 return pid_vnr(task_tgid(tsk));
1708 static inline int is_global_init(
struct task_struct *tsk)
1710 return tsk->
pid == 1;
1722 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1726 static inline void put_task_struct(
struct task_struct *
t)
1738 #define PF_EXITING 0x00000004
1739 #define PF_EXITPIDONE 0x00000008
1740 #define PF_VCPU 0x00000010
1741 #define PF_WQ_WORKER 0x00000020
1742 #define PF_FORKNOEXEC 0x00000040
1743 #define PF_MCE_PROCESS 0x00000080
1744 #define PF_SUPERPRIV 0x00000100
1745 #define PF_DUMPCORE 0x00000200
1746 #define PF_SIGNALED 0x00000400
1747 #define PF_MEMALLOC 0x00000800
1748 #define PF_NPROC_EXCEEDED 0x00001000
1749 #define PF_USED_MATH 0x00002000
1750 #define PF_NOFREEZE 0x00008000
1751 #define PF_FROZEN 0x00010000
1752 #define PF_FSTRANS 0x00020000
1753 #define PF_KSWAPD 0x00040000
1754 #define PF_LESS_THROTTLE 0x00100000
1755 #define PF_KTHREAD 0x00200000
1756 #define PF_RANDOMIZE 0x00400000
1757 #define PF_SWAPWRITE 0x00800000
1758 #define PF_SPREAD_PAGE 0x01000000
1759 #define PF_SPREAD_SLAB 0x02000000
1760 #define PF_THREAD_BOUND 0x04000000
1761 #define PF_MCE_EARLY 0x08000000
1762 #define PF_MEMPOLICY 0x10000000
1763 #define PF_MUTEX_TESTER 0x20000000
1764 #define PF_FREEZER_SKIP 0x40000000
1777 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1778 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1779 #define clear_used_math() clear_stopped_child_used_math(current)
1780 #define set_used_math() set_stopped_child_used_math(current)
1781 #define conditional_stopped_child_used_math(condition, child) \
1782 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1783 #define conditional_used_math(condition) \
1784 conditional_stopped_child_used_math(condition, current)
1785 #define copy_to_stopped_child_used_math(child) \
1786 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1788 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1789 #define used_math() tsk_used_math(current)
1794 #define JOBCTL_STOP_SIGMASK 0xffff
1796 #define JOBCTL_STOP_DEQUEUED_BIT 16
1797 #define JOBCTL_STOP_PENDING_BIT 17
1798 #define JOBCTL_STOP_CONSUME_BIT 18
1799 #define JOBCTL_TRAP_STOP_BIT 19
1800 #define JOBCTL_TRAP_NOTIFY_BIT 20
1801 #define JOBCTL_TRAPPING_BIT 21
1802 #define JOBCTL_LISTENING_BIT 22
1804 #define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
1805 #define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
1806 #define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
1807 #define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
1808 #define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
1809 #define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
1810 #define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
1812 #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1813 #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1821 #ifdef CONFIG_PREEMPT_RCU
1823 #define RCU_READ_UNLOCK_BLOCKED (1 << 0)
1824 #define RCU_READ_UNLOCK_NEED_QS (1 << 1)
1826 static inline void rcu_copy_process(
struct task_struct *
p)
1828 p->rcu_read_lock_nesting = 0;
1829 p->rcu_read_unlock_special = 0;
1830 #ifdef CONFIG_TREE_PREEMPT_RCU
1831 p->rcu_blocked_node =
NULL;
1833 #ifdef CONFIG_RCU_BOOST
1834 p->rcu_boost_mutex =
NULL;
1836 INIT_LIST_HEAD(&p->rcu_node_entry);
1841 static inline void rcu_copy_process(
struct task_struct *
p)
1850 #ifdef CONFIG_RCU_USER_QS
1851 rcu_user_hooks_switch(prev, next);
1856 unsigned long orig_flags,
unsigned long flags)
1858 task->
flags &= ~flags;
1864 const struct cpumask *new_mask);
1867 const struct cpumask *new_mask);
1869 static inline void do_set_cpus_allowed(
struct task_struct *
p,
1870 const struct cpumask *new_mask)
1873 static inline int set_cpus_allowed_ptr(
struct task_struct *
p,
1874 const struct cpumask *new_mask)
1883 void calc_load_enter_idle(
void);
1884 void calc_load_exit_idle(
void);
1886 static inline void calc_load_enter_idle(
void) { }
1887 static inline void calc_load_exit_idle(
void) { }
1890 #ifndef CONFIG_CPUMASK_OFFSTACK
1893 return set_cpus_allowed_ptr(p, &new_mask);
1916 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1917 static inline void sched_clock_tick(
void)
1921 static inline void sched_clock_idle_sleep_event(
void)
1925 static inline void sched_clock_idle_wakeup_event(
u64 delta_ns)
1935 extern int sched_clock_stable;
1937 extern void sched_clock_tick(
void);
1938 extern void sched_clock_idle_sleep_event(
void);
1939 extern void sched_clock_idle_wakeup_event(
u64 delta_ns);
1942 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1948 extern void enable_sched_clock_irqtime(
void);
1949 extern void disable_sched_clock_irqtime(
void);
1951 static inline void enable_sched_clock_irqtime(
void) {}
1952 static inline void disable_sched_clock_irqtime(
void) {}
1955 extern unsigned long long
1962 #define sched_exec() {}
1965 extern void sched_clock_idle_sleep_event(
void);
1966 extern void sched_clock_idle_wakeup_event(
u64 delta_ns);
1968 #ifdef CONFIG_HOTPLUG_CPU
1969 extern void idle_task_exit(
void);
1971 static inline void idle_task_exit(
void) {}
1974 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1975 extern void wake_up_idle_cpu(
int cpu);
1977 static inline void wake_up_idle_cpu(
int cpu) { }
1993 #ifdef CONFIG_SCHED_DEBUG
2004 #ifdef CONFIG_SCHED_DEBUG
2005 static inline unsigned int get_sysctl_timer_migration(
void)
2010 static inline unsigned int get_sysctl_timer_migration(
void)
2019 void __user *
buffer,
size_t *lenp,
2022 #ifdef CONFIG_SCHED_AUTOGROUP
2023 extern unsigned int sysctl_sched_autogroup_enabled;
2025 extern void sched_autogroup_create_attach(
struct task_struct *p);
2026 extern void sched_autogroup_detach(
struct task_struct *p);
2029 #ifdef CONFIG_PROC_FS
2031 extern int proc_sched_autogroup_set_nice(
struct task_struct *p,
int nice);
2034 static inline void sched_autogroup_create_attach(
struct task_struct *p) { }
2035 static inline void sched_autogroup_detach(
struct task_struct *p) { }
2040 #ifdef CONFIG_CFS_BANDWIDTH
2041 extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2044 #ifdef CONFIG_RT_MUTEXES
2048 static inline bool tsk_is_pi_blocked(
struct task_struct *tsk)
2050 return tsk->pi_blocked_on !=
NULL;
2057 # define rt_mutex_adjust_pi(p) do { } while (0)
2058 static inline bool tsk_is_pi_blocked(
struct task_struct *tsk)
2080 static inline bool is_idle_task(
const struct task_struct *p)
2099 #ifndef __HAVE_ARCH_KSTACK_END
2100 static inline int kstack_end(
void *
addr)
2105 return !(((
unsigned long)addr+
sizeof(
void*)-1) & (
THREAD_SIZE-
sizeof(
void*)));
2142 #include <asm/current.h>
2150 extern void kick_process(
struct task_struct *tsk);
2152 static inline void kick_process(
struct task_struct *tsk) { }
2166 unsigned long flags;
2171 spin_unlock_irqrestore(&tsk->
sighand->siglock, flags);
2201 static inline void restore_saved_sigmask(
void)
2203 if (test_and_clear_restore_sigmask())
2207 static inline sigset_t *sigmask_to_save(
void)
2210 if (
unlikely(test_restore_sigmask()))
2211 res = &
current->saved_sigmask;
2215 static inline int kill_cad_pid(
int sig,
int priv)
2217 return kill_pid(cad_pid, sig, priv);
2221 #define SEND_SIG_NOINFO ((struct siginfo *) 0)
2222 #define SEND_SIG_PRIV ((struct siginfo *) 1)
2223 #define SEND_SIG_FORCED ((struct siginfo *) 2)
2228 static inline int on_sig_stack(
unsigned long sp)
2230 #ifdef CONFIG_STACK_GROWSUP
2231 return sp >=
current->sas_ss_sp &&
2234 return sp >
current->sas_ss_sp &&
2239 static inline int sas_ss_flags(
unsigned long sp)
2252 static inline void mmdrop(
struct mm_struct * mm)
2273 extern int copy_thread(
unsigned long,
unsigned long,
unsigned long,
2286 extern void daemonize(
const char *, ...);
2291 const char __user *
const __user *,
2292 const char __user *
const __user *,
struct pt_regs *);
2293 extern long do_fork(
unsigned long,
unsigned long,
struct pt_regs *,
unsigned long,
int __user *,
int __user *);
2295 #ifdef CONFIG_GENERIC_KERNEL_THREAD
2303 void scheduler_ipi(
void);
2306 static inline void scheduler_ipi(
void) { }
2307 static inline unsigned long wait_task_inactive(
struct task_struct *p,
2314 #define next_task(p) \
2315 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2317 #define for_each_process(p) \
2318 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2326 #define do_each_thread(g, t) \
2327 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2329 #define while_each_thread(g, t) \
2330 while ((t = next_thread(t)) != g)
2332 static inline int get_nr_threads(
struct task_struct *tsk)
2334 return tsk->
signal->nr_threads;
2337 static inline bool thread_group_leader(
struct task_struct *p)
2348 static inline int has_group_leader_pid(
struct task_struct *p)
2365 static inline int thread_group_empty(
struct task_struct *p)
2370 #define delay_group_leader(p) \
2371 (thread_group_leader(p) && !thread_group_empty(p))
2383 static inline void task_lock(
struct task_struct *p)
2388 static inline void task_unlock(
struct task_struct *p)
2394 unsigned long *
flags);
2397 unsigned long *
flags)
2406 static inline void unlock_task_sighand(
struct task_struct *tsk,
2407 unsigned long *flags)
2409 spin_unlock_irqrestore(&tsk->
sighand->siglock, *flags);
2412 #ifdef CONFIG_CGROUPS
2413 static inline void threadgroup_change_begin(
struct task_struct *tsk)
2417 static inline void threadgroup_change_end(
struct task_struct *tsk)
2442 static inline void threadgroup_lock(
struct task_struct *tsk)
2458 static inline void threadgroup_unlock(
struct task_struct *tsk)
2464 static inline void threadgroup_change_begin(
struct task_struct *tsk) {}
2465 static inline void threadgroup_change_end(
struct task_struct *tsk) {}
2466 static inline void threadgroup_lock(
struct task_struct *tsk) {}
2467 static inline void threadgroup_unlock(
struct task_struct *tsk) {}
2470 #ifndef __HAVE_THREAD_FUNCTIONS
2472 #define task_thread_info(task) ((struct thread_info *)(task)->stack)
2473 #define task_stack_page(task) ((task)->stack)
2488 static inline int object_is_on_stack(
void *obj)
2492 return (obj >= stack) && (obj < (stack +
THREAD_SIZE));
2497 #ifdef CONFIG_DEBUG_STACK_USAGE
2498 static inline unsigned long stack_not_used(
struct task_struct *p)
2513 static inline void set_tsk_thread_flag(
struct task_struct *tsk,
int flag)
2518 static inline void clear_tsk_thread_flag(
struct task_struct *tsk,
int flag)
2523 static inline int test_and_set_tsk_thread_flag(
struct task_struct *tsk,
int flag)
2528 static inline int test_and_clear_tsk_thread_flag(
struct task_struct *tsk,
int flag)
2533 static inline int test_tsk_thread_flag(
struct task_struct *tsk,
int flag)
2538 static inline void set_tsk_need_resched(
struct task_struct *tsk)
2543 static inline void clear_tsk_need_resched(
struct task_struct *tsk)
2548 static inline int test_tsk_need_resched(
struct task_struct *tsk)
2553 static inline int restart_syscall(
void)
2559 static inline int signal_pending(
struct task_struct *p)
2564 static inline int __fatal_signal_pending(
struct task_struct *p)
2569 static inline int fatal_signal_pending(
struct task_struct *p)
2571 return signal_pending(p) && __fatal_signal_pending(p);
2578 if (!signal_pending(p))
2584 static inline int need_resched(
void)
2598 #define cond_resched() ({ \
2599 __might_sleep(__FILE__, __LINE__, 0); \
2605 #ifdef CONFIG_PREEMPT_COUNT
2606 #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2608 #define PREEMPT_LOCK_OFFSET 0
2611 #define cond_resched_lock(lock) ({ \
2612 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2613 __cond_resched_lock(lock); \
2618 #define cond_resched_softirq() ({ \
2619 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2620 __cond_resched_softirq(); \
2628 static inline int spin_needbreak(
spinlock_t *lock)
2630 #ifdef CONFIG_PREEMPT
2631 return spin_is_contended(lock);
2664 static inline unsigned int task_cpu(
const struct task_struct *p)
2669 extern void set_task_cpu(
struct task_struct *p,
unsigned int cpu);
2673 static inline unsigned int task_cpu(
const struct task_struct *p)
2678 static inline void set_task_cpu(
struct task_struct *p,
unsigned int cpu)
2689 #ifdef CONFIG_CGROUP_SCHED
2693 extern struct task_group *sched_create_group(
struct task_group *parent);
2694 extern void sched_destroy_group(
struct task_group *tg);
2695 extern void sched_move_task(
struct task_struct *tsk);
2696 #ifdef CONFIG_FAIR_GROUP_SCHED
2697 extern int sched_group_set_shares(
struct task_group *tg,
unsigned long shares);
2698 extern unsigned long sched_group_shares(
struct task_group *tg);
2700 #ifdef CONFIG_RT_GROUP_SCHED
2701 extern int sched_group_set_rt_runtime(
struct task_group *tg,
2702 long rt_runtime_us);
2703 extern long sched_group_rt_runtime(
struct task_group *tg);
2704 extern int sched_group_set_rt_period(
struct task_group *tg,
2706 extern long sched_group_rt_period(
struct task_group *tg);
2707 extern int sched_rt_can_attach(
struct task_group *tg,
struct task_struct *tsk);
2714 #ifdef CONFIG_TASK_XACCT
2717 tsk->
ioac.rchar += amt;
2722 tsk->
ioac.wchar += amt;
2725 static inline void inc_syscr(
struct task_struct *tsk)
2730 static inline void inc_syscw(
struct task_struct *tsk)
2743 static inline void inc_syscr(
struct task_struct *tsk)
2747 static inline void inc_syscw(
struct task_struct *tsk)
2752 #ifndef TASK_SIZE_OF
2753 #define TASK_SIZE_OF(tsk) TASK_SIZE
2756 #ifdef CONFIG_MM_OWNER
2757 extern void mm_update_next_owner(
struct mm_struct *mm);
2760 static inline void mm_update_next_owner(
struct mm_struct *mm)
2769 static inline unsigned long task_rlimit(
const struct task_struct *tsk,
2775 static inline unsigned long task_rlimit_max(
const struct task_struct *tsk,
2781 static inline unsigned long rlimit(
unsigned int limit)
2783 return task_rlimit(
current, limit);
2786 static inline unsigned long rlimit_max(
unsigned int limit)
2788 return task_rlimit_max(
current, limit);