Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
Macros | Functions | Variables
blk-core.c File Reference
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/fault-inject.h>
#include <linux/list_sort.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
#include <trace/events/block.h>
#include "blk.h"
#include "blk-cgroup.h"

Go to the source code of this file.

Macros

#define CREATE_TRACE_POINTS
 
#define PLUG_MAGIC   0x91827364
 

Functions

 EXPORT_TRACEPOINT_SYMBOL_GPL (block_bio_remap)
 
 EXPORT_TRACEPOINT_SYMBOL_GPL (block_rq_remap)
 
 EXPORT_TRACEPOINT_SYMBOL_GPL (block_bio_complete)
 
 DEFINE_IDA (blk_queue_ida)
 
void blk_queue_congestion_threshold (struct request_queue *q)
 
struct backing_dev_infoblk_get_backing_dev_info (struct block_device *bdev)
 
 EXPORT_SYMBOL (blk_get_backing_dev_info)
 
void blk_rq_init (struct request_queue *q, struct request *rq)
 
 EXPORT_SYMBOL (blk_rq_init)
 
void blk_dump_rq_flags (struct request *rq, char *msg)
 
 EXPORT_SYMBOL (blk_dump_rq_flags)
 
void blk_delay_queue (struct request_queue *q, unsigned long msecs)
 
 EXPORT_SYMBOL (blk_delay_queue)
 
void blk_start_queue (struct request_queue *q)
 
 EXPORT_SYMBOL (blk_start_queue)
 
void blk_stop_queue (struct request_queue *q)
 
 EXPORT_SYMBOL (blk_stop_queue)
 
void blk_sync_queue (struct request_queue *q)
 
 EXPORT_SYMBOL (blk_sync_queue)
 
void __blk_run_queue (struct request_queue *q)
 
 EXPORT_SYMBOL (__blk_run_queue)
 
void blk_run_queue_async (struct request_queue *q)
 
 EXPORT_SYMBOL (blk_run_queue_async)
 
void blk_run_queue (struct request_queue *q)
 
 EXPORT_SYMBOL (blk_run_queue)
 
void blk_put_queue (struct request_queue *q)
 
 EXPORT_SYMBOL (blk_put_queue)
 
void blk_drain_queue (struct request_queue *q, bool drain_all)
 
void blk_queue_bypass_start (struct request_queue *q)
 
 EXPORT_SYMBOL_GPL (blk_queue_bypass_start)
 
void blk_queue_bypass_end (struct request_queue *q)
 
 EXPORT_SYMBOL_GPL (blk_queue_bypass_end)
 
void blk_cleanup_queue (struct request_queue *q)
 
 EXPORT_SYMBOL (blk_cleanup_queue)
 
int blk_init_rl (struct request_list *rl, struct request_queue *q, gfp_t gfp_mask)
 
void blk_exit_rl (struct request_list *rl)
 
struct request_queueblk_alloc_queue (gfp_t gfp_mask)
 
 EXPORT_SYMBOL (blk_alloc_queue)
 
struct request_queueblk_alloc_queue_node (gfp_t gfp_mask, int node_id)
 
 EXPORT_SYMBOL (blk_alloc_queue_node)
 
struct request_queueblk_init_queue (request_fn_proc *rfn, spinlock_t *lock)
 
 EXPORT_SYMBOL (blk_init_queue)
 
struct request_queueblk_init_queue_node (request_fn_proc *rfn, spinlock_t *lock, int node_id)
 
 EXPORT_SYMBOL (blk_init_queue_node)
 
struct request_queueblk_init_allocated_queue (struct request_queue *q, request_fn_proc *rfn, spinlock_t *lock)
 
 EXPORT_SYMBOL (blk_init_allocated_queue)
 
bool blk_get_queue (struct request_queue *q)
 
 EXPORT_SYMBOL (blk_get_queue)
 
struct requestblk_get_request (struct request_queue *q, int rw, gfp_t gfp_mask)
 
 EXPORT_SYMBOL (blk_get_request)
 
struct requestblk_make_request (struct request_queue *q, struct bio *bio, gfp_t gfp_mask)
 
 EXPORT_SYMBOL (blk_make_request)
 
void blk_requeue_request (struct request_queue *q, struct request *rq)
 
 EXPORT_SYMBOL (blk_requeue_request)
 
void part_round_stats (int cpu, struct hd_struct *part)
 
 EXPORT_SYMBOL_GPL (part_round_stats)
 
void __blk_put_request (struct request_queue *q, struct request *req)
 
 EXPORT_SYMBOL_GPL (__blk_put_request)
 
void blk_put_request (struct request *req)
 
 EXPORT_SYMBOL (blk_put_request)
 
void blk_add_request_payload (struct request *rq, struct page *page, unsigned int len)
 
 EXPORT_SYMBOL_GPL (blk_add_request_payload)
 
void init_request_from_bio (struct request *req, struct bio *bio)
 
void blk_queue_bio (struct request_queue *q, struct bio *bio)
 
 EXPORT_SYMBOL_GPL (blk_queue_bio)
 
void generic_make_request (struct bio *bio)
 
 EXPORT_SYMBOL (generic_make_request)
 
void submit_bio (int rw, struct bio *bio)
 
 EXPORT_SYMBOL (submit_bio)
 
int blk_rq_check_limits (struct request_queue *q, struct request *rq)
 
 EXPORT_SYMBOL_GPL (blk_rq_check_limits)
 
int blk_insert_cloned_request (struct request_queue *q, struct request *rq)
 
 EXPORT_SYMBOL_GPL (blk_insert_cloned_request)
 
unsigned int blk_rq_err_bytes (const struct request *rq)
 
 EXPORT_SYMBOL_GPL (blk_rq_err_bytes)
 
struct requestblk_peek_request (struct request_queue *q)
 
 EXPORT_SYMBOL (blk_peek_request)
 
void blk_dequeue_request (struct request *rq)
 
void blk_start_request (struct request *req)
 
 EXPORT_SYMBOL (blk_start_request)
 
struct requestblk_fetch_request (struct request_queue *q)
 
 EXPORT_SYMBOL (blk_fetch_request)
 
bool blk_update_request (struct request *req, int error, unsigned int nr_bytes)
 
 EXPORT_SYMBOL_GPL (blk_update_request)
 
void blk_unprep_request (struct request *req)
 
 EXPORT_SYMBOL_GPL (blk_unprep_request)
 
bool __blk_end_bidi_request (struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes)
 
bool blk_end_request (struct request *rq, int error, unsigned int nr_bytes)
 
 EXPORT_SYMBOL (blk_end_request)
 
void blk_end_request_all (struct request *rq, int error)
 
 EXPORT_SYMBOL (blk_end_request_all)
 
bool blk_end_request_cur (struct request *rq, int error)
 
 EXPORT_SYMBOL (blk_end_request_cur)
 
bool blk_end_request_err (struct request *rq, int error)
 
 EXPORT_SYMBOL_GPL (blk_end_request_err)
 
bool __blk_end_request (struct request *rq, int error, unsigned int nr_bytes)
 
 EXPORT_SYMBOL (__blk_end_request)
 
void __blk_end_request_all (struct request *rq, int error)
 
 EXPORT_SYMBOL (__blk_end_request_all)
 
bool __blk_end_request_cur (struct request *rq, int error)
 
 EXPORT_SYMBOL (__blk_end_request_cur)
 
bool __blk_end_request_err (struct request *rq, int error)
 
 EXPORT_SYMBOL_GPL (__blk_end_request_err)
 
void blk_rq_bio_prep (struct request_queue *q, struct request *rq, struct bio *bio)
 
int blk_lld_busy (struct request_queue *q)
 
 EXPORT_SYMBOL_GPL (blk_lld_busy)
 
void blk_rq_unprep_clone (struct request *rq)
 
 EXPORT_SYMBOL_GPL (blk_rq_unprep_clone)
 
int blk_rq_prep_clone (struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int(*bio_ctr)(struct bio *, struct bio *, void *), void *data)
 
 EXPORT_SYMBOL_GPL (blk_rq_prep_clone)
 
int kblockd_schedule_work (struct request_queue *q, struct work_struct *work)
 
 EXPORT_SYMBOL (kblockd_schedule_work)
 
int kblockd_schedule_delayed_work (struct request_queue *q, struct delayed_work *dwork, unsigned long delay)
 
 EXPORT_SYMBOL (kblockd_schedule_delayed_work)
 
void blk_start_plug (struct blk_plug *plug)
 
 EXPORT_SYMBOL (blk_start_plug)
 
struct blk_plug_cb * blk_check_plugged (blk_plug_cb_fn unplug, void *data, int size)
 
 EXPORT_SYMBOL (blk_check_plugged)
 
void blk_flush_plug_list (struct blk_plug *plug, bool from_schedule)
 
void blk_finish_plug (struct blk_plug *plug)
 
 EXPORT_SYMBOL (blk_finish_plug)
 
int __init blk_dev_init (void)
 

Variables

struct kmem_cacheblk_requestq_cachep
 

Macro Definition Documentation

#define CREATE_TRACE_POINTS

Definition at line 34 of file blk-core.c.

#define PLUG_MAGIC   0x91827364

Definition at line 2827 of file blk-core.c.

Function Documentation

bool __blk_end_bidi_request ( struct request rq,
int  error,
unsigned int  nr_bytes,
unsigned int  bidi_bytes 
)

__blk_end_bidi_request - Complete a bidi request with queue lock held : the request to complete : %0 for success, < %0 for error : number of bytes to complete : number of bytes to complete ->next_rq

Description: Identical to blk_end_bidi_request() except that queue lock is assumed to be locked on entry and remains so on return.

Return: false - we are done with this request true - still buffers pending for this request

Definition at line 2488 of file blk-core.c.

bool __blk_end_request ( struct request rq,
int  error,
unsigned int  nr_bytes 
)

__blk_end_request - Helper function for drivers to complete the request. : the request being processed : %0 for success, < %0 for error : number of bytes to complete

Description: Must be called with queue lock held unlike blk_end_request().

Return: false - we are done with this request true - still buffers pending for this request

Definition at line 2590 of file blk-core.c.

void __blk_end_request_all ( struct request rq,
int  error 
)

__blk_end_request_all - Helper function for drives to finish the request. : the request to finish : %0 for success, < %0 for error

Description: Completely finish . Must be called with queue lock held.

Definition at line 2604 of file blk-core.c.

bool __blk_end_request_cur ( struct request rq,
int  error 
)

__blk_end_request_cur - Helper function to finish the current request chunk. : the request to finish the current chunk for : %0 for success, < %0 for error

Description: Complete the current consecutively mapped chunk from . Must be called with queue lock held.

Return: false - we are done with this request true - still buffers pending for this request

Definition at line 2630 of file blk-core.c.

bool __blk_end_request_err ( struct request rq,
int  error 
)

__blk_end_request_err - Finish a request till the next failure boundary. : the request to finish till the next failure boundary for : must be negative errno

Description: Complete till the next failure boundary. Must be called with queue lock held.

Return: false - we are done with this request true - still buffers pending for this request

Definition at line 2649 of file blk-core.c.

void __blk_put_request ( struct request_queue q,
struct request req 
)

Definition at line 1229 of file blk-core.c.

void __blk_run_queue ( struct request_queue q)

__blk_run_queue - run a single device queue : The queue to run

Description: See . This variant must be called with the queue lock held and interrupts disabled.

Definition at line 303 of file blk-core.c.

void blk_add_request_payload ( struct request rq,
struct page page,
unsigned int  len 
)

Definition at line 1283 of file blk-core.c.

struct request_queue* blk_alloc_queue ( gfp_t  gfp_mask)
read

Definition at line 550 of file blk-core.c.

struct request_queue* blk_alloc_queue_node ( gfp_t  gfp_mask,
int  node_id 
)
read

Definition at line 556 of file blk-core.c.

struct blk_plug_cb* blk_check_plugged ( blk_plug_cb_fn  unplug,
void data,
int  size 
)
read

Definition at line 2927 of file blk-core.c.

void blk_cleanup_queue ( struct request_queue q)

blk_cleanup_queue - shutdown a request queue : request queue to shutdown

Mark DEAD, drain all pending requests, destroy and put it. All future requests will be failed immediately with -ENODEV.

Definition at line 479 of file blk-core.c.

void blk_delay_queue ( struct request_queue q,
unsigned long  msecs 
)

blk_delay_queue - restart queueing after defined interval : The &struct request_queue in question : Delay in msecs

Description: Sometimes queueing needs to be postponed for a little while, to allow resources to come back. This function will make sure that queueing is restarted around the specified time.

Definition at line 224 of file blk-core.c.

void blk_dequeue_request ( struct request rq)

Definition at line 2120 of file blk-core.c.

int __init blk_dev_init ( void  )

Definition at line 3033 of file blk-core.c.

void blk_drain_queue ( struct request_queue q,
bool  drain_all 
)

blk_drain_queue - drain requests from request_queue : queue to drain : whether to drain all requests or only the ones w/ ELVPRIV

Drain requests from . If is set, all requests are drained. If not, only ELVPRIV requests are drained. The caller is responsible for ensuring that no new requests which need to be drained are queued.

Definition at line 360 of file blk-core.c.

void blk_dump_rq_flags ( struct request rq,
char msg 
)

Definition at line 181 of file blk-core.c.

bool blk_end_request ( struct request rq,
int  error,
unsigned int  nr_bytes 
)

blk_end_request - Helper function for drivers to complete the request. : the request being processed : %0 for success, < %0 for error : number of bytes to complete

Description: Ends I/O on a number of bytes attached to . If has leftover, sets it up for the next range of segments.

Return: false - we are done with this request true - still buffers pending for this request

Definition at line 2513 of file blk-core.c.

void blk_end_request_all ( struct request rq,
int  error 
)

blk_end_request_all - Helper function for drives to finish the request. : the request to finish : %0 for success, < %0 for error

Description: Completely finish .

Definition at line 2527 of file blk-core.c.

bool blk_end_request_cur ( struct request rq,
int  error 
)

blk_end_request_cur - Helper function to finish the current request chunk. : the request to finish the current chunk for : %0 for success, < %0 for error

Description: Complete the current consecutively mapped chunk from .

Return: false - we are done with this request true - still buffers pending for this request

Definition at line 2552 of file blk-core.c.

bool blk_end_request_err ( struct request rq,
int  error 
)

blk_end_request_err - Finish a request till the next failure boundary. : the request to finish till the next failure boundary for : must be negative errno

Description: Complete till the next failure boundary.

Return: false - we are done with this request true - still buffers pending for this request

Definition at line 2570 of file blk-core.c.

void blk_exit_rl ( struct request_list *  rl)

Definition at line 544 of file blk-core.c.

struct request* blk_fetch_request ( struct request_queue q)
read

blk_fetch_request - fetch a request from a request queue : request queue to fetch a request from

Description: Return the request at the top of . The request is started on return and LLD can start processing it immediately.

Return: Pointer to the request at the top of if available. Null otherwise.

Context: queue_lock must be held.

Definition at line 2185 of file blk-core.c.

void blk_finish_plug ( struct blk_plug plug)

Definition at line 3024 of file blk-core.c.

void blk_flush_plug_list ( struct blk_plug plug,
bool  from_schedule 
)

Definition at line 2952 of file blk-core.c.

struct backing_dev_info* blk_get_backing_dev_info ( struct block_device bdev)
read

blk_get_backing_dev_info - get the address of a queue's backing_dev_info : device

Locates the passed device's request queue and returns the address of its backing_dev_info

Will return NULL if the request queue cannot be located.

Definition at line 121 of file blk-core.c.

bool blk_get_queue ( struct request_queue q)

Definition at line 717 of file blk-core.c.

struct request* blk_get_request ( struct request_queue q,
int  rw,
gfp_t  gfp_mask 
)
read

Definition at line 1080 of file blk-core.c.

struct request_queue* blk_init_allocated_queue ( struct request_queue q,
request_fn_proc *  rfn,
spinlock_t lock 
)
read

Definition at line 685 of file blk-core.c.

struct request_queue* blk_init_queue ( request_fn_proc *  rfn,
spinlock_t lock 
)
read

blk_init_queue - prepare a request queue for use with a block device : The function to be called to process requests that have been placed on the queue. : Request queue spin lock

Description: If a block device wishes to use the standard request handling procedures, which sorts requests and coalesces adjacent requests, then it must call blk_init_queue(). The function will be called when there are requests on the queue that need to be processed. If the device supports plugging, then may not be called immediately when requests are available on the queue, but may be called at some time later instead. Plugged queues are generally unplugged when a buffer belonging to one of the requests on the queue is needed, or due to memory pressure.

is not required, or even expected, to remove all requests off the queue, but only as many as it can handle at a time. If it does leave requests on the queue, it is responsible for arranging that the requests get dealt with eventually.

The queue spin lock must be held while manipulating the requests on the request queue; this lock will be taken also from interrupt context, so irq disabling is needed for it.

Function returns a pointer to the initialized request queue, or NULL if it didn't succeed.

Note: blk_init_queue() must be paired with a blk_cleanup_queue() call when the block device is deactivated (such as at module unload).

Definition at line 661 of file blk-core.c.

struct request_queue* blk_init_queue_node ( request_fn_proc *  rfn,
spinlock_t lock,
int  node_id 
)
read

Definition at line 668 of file blk-core.c.

int blk_init_rl ( struct request_list *  rl,
struct request_queue q,
gfp_t  gfp_mask 
)

Definition at line 523 of file blk-core.c.

int blk_insert_cloned_request ( struct request_queue q,
struct request rq 
)

blk_insert_cloned_request - Helper for stacking drivers to submit a request : the queue to submit the request : the request being queued

Definition at line 1900 of file blk-core.c.

int blk_lld_busy ( struct request_queue q)

blk_lld_busy - Check if underlying low-level drivers of a device are busy : the queue of the device being checked

Description: Check if underlying low-level drivers of a device are busy. If the drivers want to export their busy state, they must set own exporting function using blk_queue_lld_busy() first.

Basically, this function is used only by request stacking drivers to stop dispatching requests to underlying devices when underlying devices are busy. This behavior helps more I/O merging on the queue of the request stacking driver and prevents I/O throughput regression on burst I/O load.

Return: 0 - Not busy (The request stacking driver should dispatch request) 1 - Busy (The request stacking driver should stop dispatching request)

Definition at line 2711 of file blk-core.c.

struct request* blk_make_request ( struct request_queue q,
struct bio *  bio,
gfp_t  gfp_mask 
)
read

blk_make_request - given a bio, allocate a corresponding struct request. : target request queue : The bio describing the memory mappings that will be submitted for IO. It may be a chained-bio properly constructed by block/bio layer. : gfp flags to be used for memory allocation

blk_make_request is the parallel of generic_make_request for BLOCK_PC type commands. Where the struct request needs to be farther initialized by the caller. It is passed a &struct bio, which describes the memory info of the I/O transfer.

The caller of blk_make_request must make sure that bi_io_vec are set to describe the memory buffers. That bio_data_dir() will return the needed direction of the request. (And all bio's in the passed bio-chain are properly set accordingly)

If called under none-sleepable conditions, mapped bio buffers must not need bouncing, by calling the appropriate masked or flagged allocator, suitable for the target device. Otherwise the call to blk_queue_bounce will BUG.

WARNING: When allocating/cloning a bio-chain, careful consideration should be given to how you allocate bios. In particular, you cannot use __GFP_WAIT for anything but the first bio in the chain. Otherwise you risk waiting for IO completion of a bio that hasn't been submitted yet, thus resulting in a deadlock. Alternatively bios should be allocated using bio_kmalloc() instead of bio_alloc(), as that avoids the mempool deadlock. If possible a big IO should be split into smaller parts when allocation fails. Partial allocation should not be an error, or you risk a live-lock.

Definition at line 1130 of file blk-core.c.

struct request* blk_peek_request ( struct request_queue q)
read

blk_peek_request - peek at the top of a request queue : request queue to peek at

Description: Return the request at the top of . The returned request should be started using blk_start_request() before LLD starts processing it.

Return: Pointer to the request at the top of if available. Null otherwise.

Context: queue_lock must be held.

Definition at line 2036 of file blk-core.c.

void blk_put_queue ( struct request_queue q)

Definition at line 345 of file blk-core.c.

void blk_put_request ( struct request req)

Definition at line 1259 of file blk-core.c.

void blk_queue_bio ( struct request_queue q,
struct bio *  bio 
)

Definition at line 1421 of file blk-core.c.

void blk_queue_bypass_end ( struct request_queue q)

blk_queue_bypass_end - leave queue bypass mode : queue of interest

Leave bypass mode and restore the normal queueing behavior.

Definition at line 462 of file blk-core.c.

void blk_queue_bypass_start ( struct request_queue q)

blk_queue_bypass_start - enter queue bypass mode : queue of interest

In bypass mode, only the dispatch FIFO queue of is used. This function makes enter bypass mode and drains all requests which were throttled or issued before. On return, it's guaranteed that no request is being throttled or has ELVPRIV set and blk_queue_bypass() true inside queue or RCU read lock.

Definition at line 439 of file blk-core.c.

void blk_queue_congestion_threshold ( struct request_queue q)

Definition at line 97 of file blk-core.c.

void blk_requeue_request ( struct request_queue q,
struct request rq 
)

blk_requeue_request - put a request back on queue : request queue where request should be inserted : request to be inserted

Description: Drivers often keep queueing requests until the hardware cannot accept more, when that condition happens we need to put the request back on the queue. Must be called with queue lock held.

Definition at line 1164 of file blk-core.c.

void blk_rq_bio_prep ( struct request_queue q,
struct request rq,
struct bio *  bio 
)

Definition at line 2656 of file blk-core.c.

int blk_rq_check_limits ( struct request_queue q,
struct request rq 
)

blk_rq_check_limits - Helper function to check a request for the queue limit : the queue : the request being checked

Description: may have been made based on weaker limitations of upper-level queues in request stacking drivers, and it may violate the limitation of . Since the block layer and the underlying device driver trust after it is inserted to , it should be checked against before the insertion using this generic function.

This function should also be useful for request stacking drivers in some cases below, so export this function. Request stacking drivers like request-based dm may change the queue limits while requests are in the queue (e.g. dm's table swapping). Such request stacking drivers should check those requests agaist the new queue limits again when they dispatch those requests, although such checkings are also done against the old queue limits when submitting requests.

Definition at line 1869 of file blk-core.c.

unsigned int blk_rq_err_bytes ( const struct request rq)

blk_rq_err_bytes - determine number of bytes till the next failure boundary : request to examine

Description: A request could be merge of IOs which require different failure handling. This function determines the number of bytes which can be failed from the beginning of the request without crossing into area which need to be retried further.

Return: The number of bytes to fail.

Context: queue_lock must be held.

Definition at line 1952 of file blk-core.c.

void blk_rq_init ( struct request_queue q,
struct request rq 
)

Definition at line 132 of file blk-core.c.

int blk_rq_prep_clone ( struct request rq,
struct request rq_src,
struct bio_set *  bs,
gfp_t  gfp_mask,
int(*)(struct bio *, struct bio *, void *)  bio_ctr,
void data 
)

blk_rq_prep_clone - Helper function to setup clone request : the request to be setup : original request to be cloned : bio_set that bios for clone are allocated from : memory allocation mask for bio : setup function to be called for each clone bio. Returns %0 for success, non %0 for failure. : private data to be passed to

Description: Clones bios in to , and copies attributes of to . The actual data parts of (e.g. ->cmd, ->buffer, ->sense) are not copied, and copying such parts is the caller's responsibility. Also, pages which the original bios are pointing to are not copied and the cloned bios just point same pages. So cloned bios must be completed before original bios, which means the caller must complete before .

Definition at line 2774 of file blk-core.c.

void blk_rq_unprep_clone ( struct request rq)

blk_rq_unprep_clone - Helper function to free all bios in a cloned request : the clone request to be cleaned up

Description: Free all bios in for a cloned request.

Definition at line 2727 of file blk-core.c.

void blk_run_queue ( struct request_queue q)

blk_run_queue - run a single device queue : The queue to run

Description: Invoke request handling on this queue, if it has pending work to do. May be used to restart queueing when a request has completed.

Definition at line 335 of file blk-core.c.

void blk_run_queue_async ( struct request_queue q)

blk_run_queue_async - run a single device queue in workqueue context : The queue to run

Description: Tells kblockd to perform the equivalent of on behalf of us.

Definition at line 320 of file blk-core.c.

void blk_start_plug ( struct blk_plug plug)

blk_start_plug - initialize blk_plug and track it inside the task_struct : The &struct blk_plug that needs to be initialized

Description: Tracking blk_plug inside the task_struct will help with auto-flushing the pending I/O should the task end up blocking between blk_start_plug() and blk_finish_plug(). This is important from a performance perspective, but also ensures that we don't deadlock. For instance, if the task is blocking for a memory allocation, memory reclaim could end up wanting to free a page belonging to that request that is currently residing in our private plug. By flushing the pending I/O when the process goes to sleep, we avoid this kind of deadlock.

Definition at line 2843 of file blk-core.c.

void blk_start_queue ( struct request_queue q)

blk_start_queue - restart a previously stopped queue : The &struct request_queue in question

Description: blk_start_queue() will clear the stop flag on the queue, and call the request_fn for the queue if it was in a stopped state when entered. Also see blk_stop_queue(). Queue lock must be held.

Definition at line 240 of file blk-core.c.

void blk_start_request ( struct request req)

blk_start_request - start request processing on the driver : request to dequeue

Description: Dequeue and start timeout timer on it. This hands off the request to the driver.

Block internal functions which don't want to start timer should call blk_dequeue_request().

Context: queue_lock must be held.

Definition at line 2154 of file blk-core.c.

void blk_stop_queue ( struct request_queue q)

blk_stop_queue - stop a queue : The &struct request_queue in question

Description: The Linux block layer assumes that a block driver will consume all entries on the request queue when the request_fn strategy is called. Often this will not happen, because of hardware limitations (queue depth settings). If a device driver gets a 'queue full' response, or if it simply chooses not to queue more I/O at one point, it can call this function to prevent the request_fn from being called until the driver has signalled it's ready to go again. This happens by calling blk_start_queue() to restart queue operations. Queue lock must be held.

Definition at line 263 of file blk-core.c.

void blk_sync_queue ( struct request_queue q)

blk_sync_queue - cancel any pending callbacks on a queue : the queue

Description: The block layer may perform asynchronous callback activity on a queue, such as calling the unplug function after a timeout. A block device may call blk_sync_queue to ensure that any such activity is cancelled, thus allowing it to release resources that the callbacks might use. The caller must already have made sure that its ->make_request_fn will not re-add plugging prior to calling this function.

This function does not cancel any asynchronous activity arising out of elevator or throttling code. That would require elevaotor_exit() and blkcg_exit_queue() to be called with queue lock initialized.

Definition at line 288 of file blk-core.c.

void blk_unprep_request ( struct request req)

blk_unprep_request - unprepare a request : the request

This function makes a request ready for complete resubmission (or completion). It happens only after all error handling is complete, so represents the appropriate moment to deallocate any resources that were allocated to the request in the prep_rq_fn. The queue lock is held when calling this.

Definition at line 2399 of file blk-core.c.

bool blk_update_request ( struct request req,
int  error,
unsigned int  nr_bytes 
)

blk_update_request - Special helper function for request stacking drivers : the request being processed : %0 for success, < %0 for error : number of bytes to complete

Description: Ends I/O on a number of bytes attached to , but doesn't complete the request structure even if doesn't have leftover. If has leftover, sets it up for the next range of segments.

This special helper function is only for request stacking drivers (e.g. request-based dm) so that they can handle partial completion. Actual device drivers should use blk_end_request instead.

Passing the result of blk_rq_bytes() as guarantees false return from this function.

Return: false - this request doesn't have any more data true - this request has more data

Definition at line 2218 of file blk-core.c.

DEFINE_IDA ( blk_queue_ida  )
EXPORT_SYMBOL ( blk_get_backing_dev_info  )
EXPORT_SYMBOL ( blk_rq_init  )
EXPORT_SYMBOL ( blk_dump_rq_flags  )
EXPORT_SYMBOL ( blk_delay_queue  )
EXPORT_SYMBOL ( blk_start_queue  )
EXPORT_SYMBOL ( blk_stop_queue  )
EXPORT_SYMBOL ( blk_sync_queue  )
EXPORT_SYMBOL ( __blk_run_queue  )
EXPORT_SYMBOL ( blk_run_queue_async  )
EXPORT_SYMBOL ( blk_run_queue  )
EXPORT_SYMBOL ( blk_put_queue  )
EXPORT_SYMBOL ( blk_cleanup_queue  )
EXPORT_SYMBOL ( blk_alloc_queue  )
EXPORT_SYMBOL ( blk_alloc_queue_node  )
EXPORT_SYMBOL ( blk_init_queue  )
EXPORT_SYMBOL ( blk_init_queue_node  )
EXPORT_SYMBOL ( blk_init_allocated_queue  )
EXPORT_SYMBOL ( blk_get_queue  )
EXPORT_SYMBOL ( blk_get_request  )
EXPORT_SYMBOL ( blk_make_request  )
EXPORT_SYMBOL ( blk_requeue_request  )
EXPORT_SYMBOL ( blk_put_request  )
EXPORT_SYMBOL ( generic_make_request  )
EXPORT_SYMBOL ( submit_bio  )
EXPORT_SYMBOL ( blk_peek_request  )
EXPORT_SYMBOL ( blk_start_request  )
EXPORT_SYMBOL ( blk_fetch_request  )
EXPORT_SYMBOL ( blk_end_request  )
EXPORT_SYMBOL ( blk_end_request_all  )
EXPORT_SYMBOL ( blk_end_request_cur  )
EXPORT_SYMBOL ( __blk_end_request  )
EXPORT_SYMBOL ( __blk_end_request_all  )
EXPORT_SYMBOL ( __blk_end_request_cur  )
EXPORT_SYMBOL ( kblockd_schedule_work  )
EXPORT_SYMBOL ( kblockd_schedule_delayed_work  )
EXPORT_SYMBOL ( blk_start_plug  )
EXPORT_SYMBOL ( blk_check_plugged  )
EXPORT_SYMBOL ( blk_finish_plug  )
EXPORT_SYMBOL_GPL ( blk_queue_bypass_start  )
EXPORT_SYMBOL_GPL ( blk_queue_bypass_end  )
EXPORT_SYMBOL_GPL ( part_round_stats  )
EXPORT_SYMBOL_GPL ( __blk_put_request  )
EXPORT_SYMBOL_GPL ( blk_add_request_payload  )
EXPORT_SYMBOL_GPL ( blk_queue_bio  )
EXPORT_SYMBOL_GPL ( blk_rq_check_limits  )
EXPORT_SYMBOL_GPL ( blk_insert_cloned_request  )
EXPORT_SYMBOL_GPL ( blk_rq_err_bytes  )
EXPORT_SYMBOL_GPL ( blk_update_request  )
EXPORT_SYMBOL_GPL ( blk_unprep_request  )
EXPORT_SYMBOL_GPL ( blk_end_request_err  )
EXPORT_SYMBOL_GPL ( __blk_end_request_err  )
EXPORT_SYMBOL_GPL ( blk_lld_busy  )
EXPORT_SYMBOL_GPL ( blk_rq_unprep_clone  )
EXPORT_SYMBOL_GPL ( blk_rq_prep_clone  )
EXPORT_TRACEPOINT_SYMBOL_GPL ( block_bio_remap  )
EXPORT_TRACEPOINT_SYMBOL_GPL ( block_rq_remap  )
EXPORT_TRACEPOINT_SYMBOL_GPL ( block_bio_complete  )
void generic_make_request ( struct bio *  bio)

generic_make_request - hand a buffer to its device driver for I/O : The bio describing the location in memory and on the device.

generic_make_request() is used to make I/O requests of block devices. It is passed a &struct bio, which describes the I/O that needs to be done.

generic_make_request() does not return any status. The success/failure status of the request, along with notification of completion, is delivered asynchronously through the bio->bi_end_io function described (one day) else where.

The caller of generic_make_request must make sure that bi_io_vec are set to describe the memory buffer, and that bi_dev and bi_sector are set to describe the device address, and the bi_end_io and optionally bi_private are set to describe how completion notification should be signaled.

generic_make_request and the drivers it calls may use bi_next if this bio happens to be merged with someone else, and may resubmit the bio to a lower device by calling into generic_make_request recursively, which means the bio should NOT be touched after the call to ->make_request_fn.

Definition at line 1750 of file blk-core.c.

void init_request_from_bio ( struct request req,
struct bio *  bio 
)

Definition at line 1407 of file blk-core.c.

int kblockd_schedule_delayed_work ( struct request_queue q,
struct delayed_work dwork,
unsigned long  delay 
)

Definition at line 2820 of file blk-core.c.

int kblockd_schedule_work ( struct request_queue q,
struct work_struct work 
)

Definition at line 2814 of file blk-core.c.

void part_round_stats ( int  cpu,
struct hd_struct *  part 
)

part_round_stats() - Round off the performance stats on a struct disk_stats. : cpu number for stats access : target partition

The average IO queue length and utilisation statistics are maintained by observing the current state of the queue length and the amount of time it has been in this state for.

Normally, that accounting is done on IO completion, but that can result in more than a second's worth of IO being accounted for within any one second, leading to >100% utilisation. To deal with that, we call this function to do a round-off before returning the results when reading /proc/diskstats. This accounts immediately for all queue usage up to the current jiffies and restarts the counters again.

Definition at line 1216 of file blk-core.c.

void submit_bio ( int  rw,
struct bio *  bio 
)

submit_bio - submit a bio to the block device layer for I/O : whether to READ or WRITE, or maybe to READA (read ahead) : The &struct bio which describes the I/O

submit_bio() is very similar in purpose to generic_make_request(), and uses that function to do most of the work. Both are fairly rough interfaces; must be presetup and ready for I/O.

Definition at line 1810 of file blk-core.c.

Variable Documentation

struct kmem_cache* blk_requestq_cachep

Definition at line 54 of file blk-core.c.