|
void | start_bandwidth_timer (struct hrtimer *period_timer, ktime_t period) |
|
| DEFINE_MUTEX (sched_domains_mutex) |
|
| DEFINE_PER_CPU_SHARED_ALIGNED (struct rq, runqueues) |
|
void | update_rq_clock (struct rq *rq) |
|
void | resched_task (struct task_struct *p) |
|
void | activate_task (struct rq *rq, struct task_struct *p, int flags) |
|
void | deactivate_task (struct rq *rq, struct task_struct *p, int flags) |
|
void | sched_set_stop_task (int cpu, struct task_struct *stop) |
|
int | task_curr (const struct task_struct *p) |
|
void | check_preempt_curr (struct rq *rq, struct task_struct *p, int flags) |
|
int | wake_up_process (struct task_struct *p) |
|
| EXPORT_SYMBOL (wake_up_process) |
|
int | wake_up_state (struct task_struct *p, unsigned int state) |
|
void | sched_fork (struct task_struct *p) |
|
void | wake_up_new_task (struct task_struct *p) |
|
asmlinkage void | schedule_tail (struct task_struct *prev) __releases(rq-> lock) |
|
unsigned long | nr_running (void) |
|
unsigned long | nr_uninterruptible (void) |
|
unsigned long long | nr_context_switches (void) |
|
unsigned long | nr_iowait (void) |
|
unsigned long | nr_iowait_cpu (int cpu) |
|
unsigned long | this_cpu_load (void) |
|
| EXPORT_SYMBOL (avenrun) |
|
void | get_avenrun (unsigned long *loads, unsigned long offset, int shift) |
|
void | calc_global_load (unsigned long ticks) |
|
| DEFINE_PER_CPU (struct kernel_stat, kstat) |
|
| DEFINE_PER_CPU (struct kernel_cpustat, kernel_cpustat) |
|
| EXPORT_PER_CPU_SYMBOL (kstat) |
|
| EXPORT_PER_CPU_SYMBOL (kernel_cpustat) |
|
unsigned long long | task_delta_exec (struct task_struct *p) |
|
unsigned long long | task_sched_runtime (struct task_struct *p) |
|
void | scheduler_tick (void) |
|
notrace unsigned long | get_parent_ip (unsigned long addr) |
|
asmlinkage void __sched | schedule (void) |
|
| EXPORT_SYMBOL (schedule) |
|
void __sched | schedule_preempt_disabled (void) |
|
int | default_wake_function (wait_queue_t *curr, unsigned mode, int wake_flags, void *key) |
|
| EXPORT_SYMBOL (default_wake_function) |
|
void | __wake_up (wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) |
|
| EXPORT_SYMBOL (__wake_up) |
|
void | __wake_up_locked (wait_queue_head_t *q, unsigned int mode, int nr) |
|
| EXPORT_SYMBOL_GPL (__wake_up_locked) |
|
void | __wake_up_locked_key (wait_queue_head_t *q, unsigned int mode, void *key) |
|
| EXPORT_SYMBOL_GPL (__wake_up_locked_key) |
|
void | __wake_up_sync_key (wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) |
|
| EXPORT_SYMBOL_GPL (__wake_up_sync_key) |
|
void | __wake_up_sync (wait_queue_head_t *q, unsigned int mode, int nr_exclusive) |
|
| EXPORT_SYMBOL_GPL (__wake_up_sync) |
|
void | complete (struct completion *x) |
|
| EXPORT_SYMBOL (complete) |
|
void | complete_all (struct completion *x) |
|
| EXPORT_SYMBOL (complete_all) |
|
void __sched | wait_for_completion (struct completion *x) |
|
| EXPORT_SYMBOL (wait_for_completion) |
|
unsigned long __sched | wait_for_completion_timeout (struct completion *x, unsigned long timeout) |
|
| EXPORT_SYMBOL (wait_for_completion_timeout) |
|
int __sched | wait_for_completion_interruptible (struct completion *x) |
|
| EXPORT_SYMBOL (wait_for_completion_interruptible) |
|
long __sched | wait_for_completion_interruptible_timeout (struct completion *x, unsigned long timeout) |
|
| EXPORT_SYMBOL (wait_for_completion_interruptible_timeout) |
|
int __sched | wait_for_completion_killable (struct completion *x) |
|
| EXPORT_SYMBOL (wait_for_completion_killable) |
|
long __sched | wait_for_completion_killable_timeout (struct completion *x, unsigned long timeout) |
|
| EXPORT_SYMBOL (wait_for_completion_killable_timeout) |
|
bool | try_wait_for_completion (struct completion *x) |
|
| EXPORT_SYMBOL (try_wait_for_completion) |
|
bool | completion_done (struct completion *x) |
|
| EXPORT_SYMBOL (completion_done) |
|
void __sched | interruptible_sleep_on (wait_queue_head_t *q) |
|
| EXPORT_SYMBOL (interruptible_sleep_on) |
|
long __sched | interruptible_sleep_on_timeout (wait_queue_head_t *q, long timeout) |
|
| EXPORT_SYMBOL (interruptible_sleep_on_timeout) |
|
void __sched | sleep_on (wait_queue_head_t *q) |
|
| EXPORT_SYMBOL (sleep_on) |
|
long __sched | sleep_on_timeout (wait_queue_head_t *q, long timeout) |
|
| EXPORT_SYMBOL (sleep_on_timeout) |
|
void | set_user_nice (struct task_struct *p, long nice) |
|
| EXPORT_SYMBOL (set_user_nice) |
|
int | can_nice (const struct task_struct *p, const int nice) |
|
int | task_prio (const struct task_struct *p) |
|
int | task_nice (const struct task_struct *p) |
|
| EXPORT_SYMBOL (task_nice) |
|
int | idle_cpu (int cpu) |
|
struct task_struct * | idle_task (int cpu) |
|
int | sched_setscheduler (struct task_struct *p, int policy, const struct sched_param *param) |
|
| EXPORT_SYMBOL_GPL (sched_setscheduler) |
|
int | sched_setscheduler_nocheck (struct task_struct *p, int policy, const struct sched_param *param) |
|
| SYSCALL_DEFINE3 (sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) |
|
| SYSCALL_DEFINE2 (sched_setparam, pid_t, pid, struct sched_param __user *, param) |
|
| SYSCALL_DEFINE1 (sched_getscheduler, pid_t, pid) |
|
| SYSCALL_DEFINE2 (sched_getparam, pid_t, pid, struct sched_param __user *, param) |
|
long | sched_setaffinity (pid_t pid, const struct cpumask *in_mask) |
|
| SYSCALL_DEFINE3 (sched_setaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) |
|
long | sched_getaffinity (pid_t pid, struct cpumask *mask) |
|
| SYSCALL_DEFINE3 (sched_getaffinity, pid_t, pid, unsigned int, len, unsigned long __user *, user_mask_ptr) |
|
| SYSCALL_DEFINE0 (sched_yield) |
|
int __sched | _cond_resched (void) |
|
| EXPORT_SYMBOL (_cond_resched) |
|
int | __cond_resched_lock (spinlock_t *lock) |
|
| EXPORT_SYMBOL (__cond_resched_lock) |
|
int __sched | __cond_resched_softirq (void) |
|
| EXPORT_SYMBOL (__cond_resched_softirq) |
|
void __sched | yield (void) |
|
| EXPORT_SYMBOL (yield) |
|
bool __sched | yield_to (struct task_struct *p, bool preempt) |
|
| EXPORT_SYMBOL_GPL (yield_to) |
|
void __sched | io_schedule (void) |
|
| EXPORT_SYMBOL (io_schedule) |
|
long __sched | io_schedule_timeout (long timeout) |
|
| SYSCALL_DEFINE1 (sched_get_priority_max, int, policy) |
|
| SYSCALL_DEFINE1 (sched_get_priority_min, int, policy) |
|
| SYSCALL_DEFINE2 (sched_rr_get_interval, pid_t, pid, struct timespec __user *, interval) |
|
void | sched_show_task (struct task_struct *p) |
|
void | show_state_filter (unsigned long state_filter) |
|
void __cpuinit | init_idle_bootup_task (struct task_struct *idle) |
|
void __cpuinit | init_idle (struct task_struct *idle, int cpu) |
|
void __init | sched_init_smp (void) |
|
int | in_sched_functions (unsigned long addr) |
|
| DECLARE_PER_CPU (cpumask_var_t, load_balance_tmpmask) |
|
void __init | sched_init (void) |
|
int | sched_rt_handler (struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) |
|
__wake_up_sync_key - wake up threads blocked on a waitqueue. : the waitqueue : which threads : how many wake-one or wake-many threads to wake up : opaque value to be passed to wakeup targets
The sync wakeup differs that the waker knows that it will schedule away soon, so while the target thread will be woken up, it will not be migrated to another CPU - ie. the two threads are 'synchronized' with each other. This can prevent needless bouncing between CPUs.
On UP it can prevent extra preemption.
It may be assumed that this function implies a write memory barrier before changing the task state if and only if any tasks are woken up.
Definition at line 3131 of file core.c.
complete: - signals a single thread waiting on this completion : holds the state of this particular completion
This will wake up a single thread waiting on this completion. Threads will be awakened in the same order in which they were queued.
See also complete_all(), wait_for_completion() and related routines.
It may be assumed that this function implies a write memory barrier before changing the task state if and only if any tasks are woken up.
Definition at line 3170 of file core.c.
sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
: the task in question. : new policy.
- Parameters
-
| structure containing the new RT priority. |
Just like sched_setscheduler, only don't bother checking if the current context has permission. For example, this is needed in stop_machine(): we create temporary high priority worker threads, but our caller might not have that capability.
Definition at line 3885 of file core.c.
try_wait_for_completion - try to decrement a completion without blocking : completion structure
Returns: 0 if a decrement cannot be done without blocking 1 if a decrement succeeded.
If a completion is being used as a counting completion, attempt to decrement the counter without blocking. This enables us to avoid waiting if the resource the completion is protecting is not available.
Definition at line 3359 of file core.c.
wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) : holds the state of this particular completion : timeout value in jiffies
This waits for either a completion of a specific task to be signaled or for a specified timeout to expire. It is interruptible. The timeout is in jiffies.
The return value is -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, or number of jiffies left till timeout) if completed.
Definition at line 3302 of file core.c.
wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable)) : holds the state of this particular completion : timeout value in jiffies
This waits for either a completion of a specific task to be signaled or for a specified timeout to expire. It can be interrupted by a kill signal. The timeout is in jiffies.
The return value is -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, or number of jiffies left till timeout) if completed.
Definition at line 3340 of file core.c.
wait_for_completion_timeout: - waits for completion of a task (w/timeout) : holds the state of this particular completion : timeout value in jiffies
This waits for either a completion of a specific task to be signaled or for a specified timeout to expire. The timeout is in jiffies. It is not interruptible.
The return value is 0 if timed out, and positive (at least 1, or number of jiffies left till timeout) if completed.
Definition at line 3266 of file core.c.
wake_up_process - Wake up a specific process
: The process to be woken up.
Attempt to wake up the nominated process and move it to the set of runnable processes. Returns 1 if the process was woken up, 0 if it was already running.
It may be assumed that this function implies a write memory barrier before changing the task state if and only if any tasks are woken up.
Definition at line 1498 of file core.c.
yield - yield the current processor to other threads.
Do not ever use this function, there's a 99% chance you're doing it wrong.
The scheduler is at all times free to pick the calling task as the most eligible task to run, if removing the yield() call from your code breaks it, its already broken.
Typical broken usage is:
while (!event) yield();
where one assumes that yield() will let 'the other' process run that will make event true. If the current task is a SCHED_FIFO task that will never happen. Never use yield() as a progress guarantee!!
If you want to use yield() to wait for something, use wait_event(). If you want to use yield() to be 'nice' for others, use cond_resched(). If you still want to use yield(), do not!
Definition at line 4275 of file core.c.
yield_to - yield the current processor to another thread in your thread group, or accelerate that thread toward the processor it's on.
: target task : whether task preemption is allowed or not
It's the caller's job to ensure that the target task struct can't go away on us before we can do any checks.
Returns true if we indeed boosted the target task.
Definition at line 4294 of file core.c.