7 #define BLK_BATCH_TIME (HZ/50UL)
10 #define BLK_BATCH_REQ 32
34 unsigned int nr_bytes,
unsigned int bidi_bytes);
51 static inline int blk_mark_rq_complete(
struct request *
rq)
56 static inline void blk_clear_rq_complete(
struct request *
rq)
64 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
74 if (!list_empty(&q->queue_head)) {
75 rq = list_entry_rq(q->queue_head.next);
94 if (q->flush_pending_idx != q->flush_running_idx &&
95 !queue_flush_queueable(q)) {
96 q->flush_queue_delayed = 1;
100 !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
107 struct elevator_queue *
e = q->elevator;
109 if (e->type->ops.elevator_activate_req_fn)
110 e->type->ops.elevator_activate_req_fn(q, rq);
115 struct elevator_queue *e = q->elevator;
117 if (e->type->ops.elevator_deactivate_req_fn)
118 e->type->ops.elevator_deactivate_req_fn(q, rq);
121 #ifdef CONFIG_FAIL_IO_TIMEOUT
125 const char *,
size_t);
127 static inline int blk_should_fake_timeout(
struct request_queue *q)
156 static inline int queue_congestion_on_threshold(
struct request_queue *q)
158 return q->nr_congestion_on;
164 static inline int queue_congestion_off_threshold(
struct request_queue *q)
166 return q->nr_congestion_off;
176 static inline int blk_do_io_stat(
struct request *rq)
178 return rq->rq_disk &&
180 (rq->cmd_type == REQ_TYPE_FS);
217 #ifdef CONFIG_BLK_DEV_THROTTLING