|
#define | CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) |
|
#define | FSHIFT 11 /* nr of bits of precision */ |
|
#define | FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ |
|
#define | LOAD_FREQ (5*HZ+1) /* 5 sec intervals */ |
|
#define | EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */ |
|
#define | EXP_5 2014 /* 1/exp(5sec/5min) */ |
|
#define | EXP_15 2037 /* 1/exp(5sec/15min) */ |
|
#define | CALC_LOAD(load, exp, n) |
|
#define | TASK_RUNNING 0 |
|
#define | TASK_INTERRUPTIBLE 1 |
|
#define | TASK_UNINTERRUPTIBLE 2 |
|
#define | __TASK_STOPPED 4 |
|
#define | __TASK_TRACED 8 |
|
#define | EXIT_ZOMBIE 16 |
|
#define | EXIT_DEAD 32 |
|
#define | TASK_DEAD 64 |
|
#define | TASK_WAKEKILL 128 |
|
#define | TASK_WAKING 256 |
|
#define | TASK_STATE_MAX 512 |
|
#define | TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" |
|
#define | TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
|
#define | TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) |
|
#define | TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) |
|
#define | TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) |
|
#define | TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) |
|
#define | TASK_REPORT |
|
#define | task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
|
#define | task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
|
#define | task_is_dead(task) ((task)->exit_state != 0) |
|
#define | task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
|
#define | task_contributes_to_load(task) |
|
#define | __set_task_state(tsk, state_value) do { (tsk)->state = (state_value); } while (0) |
|
#define | set_task_state(tsk, state_value) set_mb((tsk)->state, (state_value)) |
|
#define | __set_current_state(state_value) do { current->state = (state_value); } while (0) |
|
#define | set_current_state(state_value) set_mb(current->state, (state_value)) |
|
#define | TASK_COMM_LEN 16 |
|
#define | __sched __attribute__((__section__(".sched.text"))) |
|
#define | MAX_SCHEDULE_TIMEOUT LONG_MAX |
|
#define | MAPCOUNT_ELF_CORE_MARGIN (5) |
|
#define | DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) |
|
#define | SUID_DUMPABLE_DISABLED 0 |
|
#define | SUID_DUMPABLE_ENABLED 1 |
|
#define | SUID_DUMPABLE_SAFE 2 |
|
#define | MMF_DUMPABLE 0 /* core dump is permitted */ |
|
#define | MMF_DUMP_SECURELY 1 /* core file is readable only by root */ |
|
#define | MMF_DUMPABLE_BITS 2 |
|
#define | MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) |
|
#define | MMF_DUMP_ANON_PRIVATE 2 |
|
#define | MMF_DUMP_ANON_SHARED 3 |
|
#define | MMF_DUMP_MAPPED_PRIVATE 4 |
|
#define | MMF_DUMP_MAPPED_SHARED 5 |
|
#define | MMF_DUMP_ELF_HEADERS 6 |
|
#define | MMF_DUMP_HUGETLB_PRIVATE 7 |
|
#define | MMF_DUMP_HUGETLB_SHARED 8 |
|
#define | MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS |
|
#define | MMF_DUMP_FILTER_BITS 7 |
|
#define | MMF_DUMP_FILTER_MASK (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) |
|
#define | MMF_DUMP_FILTER_DEFAULT |
|
#define | MMF_DUMP_MASK_DEFAULT_ELF 0 |
|
#define | MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ |
|
#define | MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ |
|
#define | MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ |
|
#define | MMF_HAS_UPROBES 19 /* has uprobes */ |
|
#define | MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ |
|
#define | MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) |
|
#define | prof_exp stime |
|
#define | virt_exp utime |
|
#define | sched_exp sum_exec_runtime |
|
#define | INIT_CPUTIME |
|
#define | INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE) |
|
#define | SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ |
|
#define | SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ |
|
#define | SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ |
|
#define | SIGNAL_CLD_STOPPED 0x00000010 |
|
#define | SIGNAL_CLD_CONTINUED 0x00000020 |
|
#define | SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) |
|
#define | SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ |
|
#define | INIT_USER (&root_user) |
|
#define | SCHED_LOAD_RESOLUTION 0 |
|
#define | scale_load(w) (w) |
|
#define | scale_load_down(w) (w) |
|
#define | SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) |
|
#define | SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) |
|
#define | SCHED_POWER_SHIFT 10 |
|
#define | SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT) |
|
#define | WF_SYNC 0x01 /* waker goes to sleep after wakup */ |
|
#define | WF_FORK 0x02 /* child wakeup after fork */ |
|
#define | WF_MIGRATED 0x04 /* internal use, task got migrated */ |
|
#define | ENQUEUE_WAKEUP 1 |
|
#define | ENQUEUE_HEAD 2 |
|
#define | ENQUEUE_WAKING 0 |
|
#define | DEQUEUE_SLEEP 1 |
|
#define | RR_TIMESLICE (100 * HZ / 1000) |
|
#define | tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) |
|
#define | MAX_USER_RT_PRIO 100 |
|
#define | MAX_RT_PRIO MAX_USER_RT_PRIO |
|
#define | MAX_PRIO (MAX_RT_PRIO + 40) |
|
#define | DEFAULT_PRIO (MAX_RT_PRIO + 20) |
|
#define | get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) |
|
#define | PF_EXITING 0x00000004 /* getting shut down */ |
|
#define | PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
|
#define | PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
|
#define | PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ |
|
#define | PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
|
#define | PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ |
|
#define | PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
|
#define | PF_DUMPCORE 0x00000200 /* dumped core */ |
|
#define | PF_SIGNALED 0x00000400 /* killed by a signal */ |
|
#define | PF_MEMALLOC 0x00000800 /* Allocating memory */ |
|
#define | PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ |
|
#define | PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ |
|
#define | PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ |
|
#define | PF_FROZEN 0x00010000 /* frozen for system suspend */ |
|
#define | PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ |
|
#define | PF_KSWAPD 0x00040000 /* I am kswapd */ |
|
#define | PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
|
#define | PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
|
#define | PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ |
|
#define | PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
|
#define | PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ |
|
#define | PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ |
|
#define | PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ |
|
#define | PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
|
#define | PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
|
#define | PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
|
#define | PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
|
#define | clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) |
|
#define | set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) |
|
#define | clear_used_math() clear_stopped_child_used_math(current) |
|
#define | set_used_math() set_stopped_child_used_math(current) |
|
#define | conditional_stopped_child_used_math(condition, child) do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) |
|
#define | conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) |
|
#define | copy_to_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) |
|
#define | tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
|
#define | used_math() tsk_used_math(current) |
|
#define | JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ |
|
#define | JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ |
|
#define | JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ |
|
#define | JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ |
|
#define | JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ |
|
#define | JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ |
|
#define | JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ |
|
#define | JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ |
|
#define | JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) |
|
#define | JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) |
|
#define | JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) |
|
#define | JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) |
|
#define | JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) |
|
#define | JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) |
|
#define | JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) |
|
#define | JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) |
|
#define | JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) |
|
#define | sched_exec() {} |
|
#define | rt_mutex_adjust_pi(p) do { } while (0) |
|
#define | SEND_SIG_NOINFO ((struct siginfo *) 0) |
|
#define | SEND_SIG_PRIV ((struct siginfo *) 1) |
|
#define | SEND_SIG_FORCED ((struct siginfo *) 2) |
|
#define | next_task(p) list_entry_rcu((p)->tasks.next, struct task_struct, tasks) |
|
#define | for_each_process(p) for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
|
#define | do_each_thread(g, t) for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do |
|
#define | while_each_thread(g, t) while ((t = next_thread(t)) != g) |
|
#define | delay_group_leader(p) (thread_group_leader(p) && !thread_group_empty(p)) |
|
#define | task_thread_info(task) ((struct thread_info *)(task)->stack) |
|
#define | task_stack_page(task) ((task)->stack) |
|
#define | cond_resched() |
|
#define | PREEMPT_LOCK_OFFSET 0 |
|
#define | cond_resched_lock(lock) |
|
#define | cond_resched_softirq() |
|
#define | TASK_SIZE_OF(tsk) TASK_SIZE |
|
|
void | get_avenrun (unsigned long *loads, unsigned long offset, int shift) |
|
| DECLARE_PER_CPU (unsigned long, process_counts) |
|
int | nr_processes (void) |
|
unsigned long | nr_running (void) |
|
unsigned long | nr_uninterruptible (void) |
|
unsigned long | nr_iowait (void) |
|
unsigned long | nr_iowait_cpu (int cpu) |
|
unsigned long | this_cpu_load (void) |
|
void | calc_global_load (unsigned long ticks) |
|
void | update_cpu_load_nohz (void) |
|
unsigned long | get_parent_ip (unsigned long addr) |
|
void | sched_init (void) |
|
void | sched_init_smp (void) |
|
asmlinkage void | schedule_tail (struct task_struct *prev) |
|
void | init_idle (struct task_struct *idle, int cpu) |
|
void | init_idle_bootup_task (struct task_struct *idle) |
|
int | runqueue_is_locked (int cpu) |
|
void | show_state_filter (unsigned long state_filter) |
|
void | show_regs (struct pt_regs *) |
|
void | show_stack (struct task_struct *task, unsigned long *sp) |
|
void | io_schedule (void) |
|
long | io_schedule_timeout (long timeout) |
|
void | cpu_init (void) |
|
void | trap_init (void) |
|
void | update_process_times (int user) |
|
void | scheduler_tick (void) |
|
void | sched_show_task (struct task_struct *p) |
|
int | in_sched_functions (unsigned long addr) |
|
signed long | schedule_timeout (signed long timeout) |
|
signed long | schedule_timeout_interruptible (signed long timeout) |
|
signed long | schedule_timeout_killable (signed long timeout) |
|
signed long | schedule_timeout_uninterruptible (signed long timeout) |
|
asmlinkage void | schedule (void) |
|
void | schedule_preempt_disabled (void) |
|
int | mutex_spin_on_owner (struct mutex *lock, struct task_struct *owner) |
|
void | set_dumpable (struct mm_struct *mm, int value) |
|
int | get_dumpable (struct mm_struct *mm) |
|
int | uids_sysfs_init (void) |
|
struct user_struct * | find_user (kuid_t) |
|
pid_t | __task_pid_nr_ns (struct task_struct *task, enum pid_type type, struct pid_namespace *ns) |
|
pid_t | task_tgid_nr_ns (struct task_struct *tsk, struct pid_namespace *ns) |
|
int | is_container_init (struct task_struct *tsk) |
|
void | free_task (struct task_struct *tsk) |
|
void | __put_task_struct (struct task_struct *t) |
|
void | task_times (struct task_struct *p, cputime_t *ut, cputime_t *st) |
|
void | thread_group_times (struct task_struct *p, cputime_t *ut, cputime_t *st) |
|
bool | task_set_jobctl_pending (struct task_struct *task, unsigned int mask) |
|
void | task_clear_jobctl_trapping (struct task_struct *task) |
|
void | task_clear_jobctl_pending (struct task_struct *task, unsigned int mask) |
|
unsigned long long notrace | sched_clock (void) |
|
u64 | cpu_clock (int cpu) |
|
u64 | local_clock (void) |
|
u64 | sched_clock_cpu (int cpu) |
|
void | sched_clock_init (void) |
|
unsigned long long | task_sched_runtime (struct task_struct *task) |
|
int | sched_rt_handler (struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) |
|
bool | yield_to (struct task_struct *p, bool preempt) |
|
void | set_user_nice (struct task_struct *p, long nice) |
|
int | task_prio (const struct task_struct *p) |
|
int | task_nice (const struct task_struct *p) |
|
int | can_nice (const struct task_struct *p, const int nice) |
|
int | task_curr (const struct task_struct *p) |
|
int | idle_cpu (int cpu) |
|
int | sched_setscheduler (struct task_struct *, int, const struct sched_param *) |
|
int | sched_setscheduler_nocheck (struct task_struct *, int, const struct sched_param *) |
|
struct task_struct * | idle_task (int cpu) |
|
struct task_struct * | curr_task (int cpu) |
|
void | set_curr_task (int cpu, struct task_struct *p) |
|
void | yield (void) |
|
struct task_struct * | find_task_by_vpid (pid_t nr) |
|
struct task_struct * | find_task_by_pid_ns (pid_t nr, struct pid_namespace *ns) |
|
void | __set_special_pids (struct pid *pid) |
|
struct user_struct * | alloc_uid (kuid_t) |
|
void | free_uid (struct user_struct *) |
|
void | xtime_update (unsigned long ticks) |
|
int | wake_up_state (struct task_struct *tsk, unsigned int state) |
|
int | wake_up_process (struct task_struct *tsk) |
|
void | wake_up_new_task (struct task_struct *tsk) |
|
void | sched_fork (struct task_struct *p) |
|
void | sched_dead (struct task_struct *p) |
|
void | proc_caches_init (void) |
|
void | flush_signals (struct task_struct *) |
|
void | __flush_signals (struct task_struct *) |
|
void | ignore_signals (struct task_struct *) |
|
void | flush_signal_handlers (struct task_struct *, int force_default) |
|
int | dequeue_signal (struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
|
void | block_all_signals (int(*notifier)(void *priv), void *priv, sigset_t *mask) |
|
void | unblock_all_signals (void) |
|
void | release_task (struct task_struct *p) |
|
int | send_sig_info (int, struct siginfo *, struct task_struct *) |
|
int | force_sigsegv (int, struct task_struct *) |
|
int | force_sig_info (int, struct siginfo *, struct task_struct *) |
|
int | __kill_pgrp_info (int sig, struct siginfo *info, struct pid *pgrp) |
|
int | kill_pid_info (int sig, struct siginfo *info, struct pid *pid) |
|
int | kill_pid_info_as_cred (int, struct siginfo *, struct pid *, const struct cred *, u32) |
|
int | kill_pgrp (struct pid *pid, int sig, int priv) |
|
int | kill_pid (struct pid *pid, int sig, int priv) |
|
int | kill_proc_info (int, struct siginfo *, pid_t) |
|
__must_check bool | do_notify_parent (struct task_struct *, int) |
|
void | __wake_up_parent (struct task_struct *p, struct task_struct *parent) |
|
void | force_sig (int, struct task_struct *) |
|
int | send_sig (int, struct task_struct *, int) |
|
int | zap_other_threads (struct task_struct *p) |
|
struct sigqueue * | sigqueue_alloc (void) |
|
void | sigqueue_free (struct sigqueue *) |
|
int | send_sigqueue (struct sigqueue *, struct task_struct *, int group) |
|
int | do_sigaction (int, struct k_sigaction *, struct k_sigaction *) |
|
int | do_sigaltstack (const stack_t __user *, stack_t __user *, unsigned long) |
|
struct mm_struct * | mm_alloc (void) |
|
void | __mmdrop (struct mm_struct *) |
|
void | mmput (struct mm_struct *) |
|
struct mm_struct * | get_task_mm (struct task_struct *task) |
|
struct mm_struct * | mm_access (struct task_struct *task, unsigned int mode) |
|
void | mm_release (struct task_struct *, struct mm_struct *) |
|
struct mm_struct * | dup_mm (struct task_struct *tsk) |
|
int | copy_thread (unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *) |
|
void | flush_thread (void) |
|
void | exit_thread (void) |
|
void | exit_files (struct task_struct *) |
|
void | __cleanup_sighand (struct sighand_struct *) |
|
void | exit_itimers (struct signal_struct *) |
|
void | flush_itimer_signals (void) |
|
void | do_group_exit (int) |
|
void | daemonize (const char *,...) |
|
int | allow_signal (int) |
|
int | disallow_signal (int) |
|
int | do_execve (const char *, const char __user *const __user *, const char __user *const __user *, struct pt_regs *) |
|
long | do_fork (unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *) |
|
struct task_struct * | fork_idle (int) |
|
void | set_task_comm (struct task_struct *tsk, char *from) |
|
char * | get_task_comm (char *to, struct task_struct *tsk) |
|
bool | current_is_single_threaded (void) |
|
struct sighand_struct * | __lock_task_sighand (struct task_struct *tsk, unsigned long *flags) |
|
void | thread_info_cache_init (void) |
|
int | _cond_resched (void) |
|
int | __cond_resched_lock (spinlock_t *lock) |
|
int | __cond_resched_softirq (void) |
|
void | thread_group_cputime (struct task_struct *tsk, struct task_cputime *times) |
|
void | thread_group_cputimer (struct task_struct *tsk, struct task_cputime *times) |
|
void | recalc_sigpending_and_wake (struct task_struct *t) |
|
void | recalc_sigpending (void) |
|
void | signal_wake_up (struct task_struct *t, int resume_stopped) |
|
long | sched_setaffinity (pid_t pid, const struct cpumask *new_mask) |
|
long | sched_getaffinity (pid_t pid, struct cpumask *mask) |
|
void | normalize_rt_tasks (void) |
|
int | task_can_switch_user (struct user_struct *up, struct task_struct *tsk) |
|