Linux Kernel
3.7.1
|
#include <linux/timer.h>
#include <linux/ktime.h>
#include <linux/sunrpc/types.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/sunrpc/xdr.h>
Go to the source code of this file.
Data Structures | |
struct | rpc_message |
struct | rpc_wait |
struct | rpc_task |
struct | rpc_call_ops |
struct | rpc_task_setup |
struct | rpc_timer |
struct | rpc_wait_queue |
Macros | |
#define | tk_xprt tk_client->cl_xprt |
#define | task_for_each(task, pos, head) |
#define | task_for_first(task, head) |
#define | RPC_TASK_ASYNC 0x0001 /* is an async task */ |
#define | RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */ |
#define | RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ |
#define | RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ |
#define | RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ |
#define | RPC_TASK_KILLED 0x0100 /* task was killed */ |
#define | RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ |
#define | RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ |
#define | RPC_TASK_SENT 0x0800 /* message was sent */ |
#define | RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ |
#define | RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) |
#define | RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) |
#define | RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) |
#define | RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) |
#define | RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) |
#define | RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) |
#define | RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) |
#define | RPC_TASK_RUNNING 0 |
#define | RPC_TASK_QUEUED 1 |
#define | RPC_TASK_ACTIVE 2 |
#define | RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) |
#define | rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) |
#define | rpc_test_and_set_running(t) test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) |
#define | rpc_clear_running(t) |
#define | RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) |
#define | rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) |
#define | rpc_clear_queued(t) |
#define | RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) |
#define | RPC_PRIORITY_LOW (-1) |
#define | RPC_PRIORITY_NORMAL (0) |
#define | RPC_PRIORITY_HIGH (1) |
#define | RPC_PRIORITY_PRIVILEGED (2) |
#define | RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW) |
#define | RPC_BATCH_COUNT 16 |
#define | RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) |
Typedefs | |
typedef void(* | rpc_action )(struct rpc_task *) |
Variables | |
struct workqueue_struct * | rpciod_workqueue |
#define RPC_ASSASSINATED | ( | t | ) | ((t)->tk_flags & RPC_TASK_KILLED) |
#define rpc_clear_queued | ( | t | ) |
#define rpc_clear_running | ( | t | ) |
#define RPC_DO_ROOTOVERRIDE | ( | t | ) | ((t)->tk_flags & RPC_TASK_ROOTCREDS) |
#define RPC_IS_ACTIVATED | ( | t | ) | test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) |
#define RPC_IS_ASYNC | ( | t | ) | ((t)->tk_flags & RPC_TASK_ASYNC) |
#define RPC_IS_QUEUED | ( | t | ) | test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) |
#define RPC_IS_RUNNING | ( | t | ) | test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) |
#define RPC_IS_SOFT | ( | t | ) | ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) |
#define RPC_IS_SOFTCONN | ( | t | ) | ((t)->tk_flags & RPC_TASK_SOFTCONN) |
#define RPC_IS_SWAPPER | ( | t | ) | ((t)->tk_flags & RPC_TASK_SWAPPER) |
#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW) |
#define rpc_set_queued | ( | t | ) | set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) |
#define rpc_set_running | ( | t | ) | set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) |
#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ |
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ |
#define rpc_test_and_set_running | ( | t | ) | test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) |
#define RPC_WAS_SENT | ( | t | ) | ((t)->tk_flags & RPC_TASK_SENT) |
void rpc_destroy_wait_queue | ( | struct rpc_wait_queue * | ) |
void rpc_init_priority_wait_queue | ( | struct rpc_wait_queue * | , |
const char * | |||
) |
void rpc_init_wait_queue | ( | struct rpc_wait_queue * | , |
const char * | |||
) |
rpc_malloc - allocate an RPC buffer : RPC task that will use this buffer : requested byte size
To prevent rpciod from hanging, this allocator never sleeps, returning NULL if the request cannot be serviced immediately. The caller can arrange to sleep in a way that is safe for rpciod.
Most requests are 'small' (under 2KiB) and can be serviced from a mempool, ensuring that NFS reads and writes can always proceed, and that there is good locality of reference for these buffers.
In order to avoid memory starvation triggering more writebacks of NFS requests, we avoid using GFP_KERNEL.
|
read |
int rpc_queue_empty | ( | struct rpc_wait_queue * | ) |
void rpc_release_calldata | ( | const struct rpc_call_ops * | , |
void * | |||
) |
|
read |
void rpc_sleep_on | ( | struct rpc_wait_queue * | , |
struct rpc_task * | , | ||
rpc_action | action | ||
) |
void rpc_sleep_on_priority | ( | struct rpc_wait_queue * | , |
struct rpc_task * | , | ||
rpc_action | action, | ||
int | priority | ||
) |
void rpc_wake_up | ( | struct rpc_wait_queue * | queue | ) |
rpc_wake_up - wake up all rpc_tasks : rpc_wait_queue on which the tasks are sleeping
Grabs queue->lock
|
read |
void rpc_wake_up_queued_task | ( | struct rpc_wait_queue * | , |
struct rpc_task * | |||
) |
void rpc_wake_up_status | ( | struct rpc_wait_queue * | queue, |
int | status | ||
) |
rpc_wake_up_status - wake up all rpc_tasks and set their status value. : rpc_wait_queue on which the tasks are sleeping : status value to set
Grabs queue->lock
struct workqueue_struct* rpciod_workqueue |