1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H
4 #include <linux/sched.h>
10 #include <linux/list.h>
15 #include <linux/wait.h>
20 #include <linux/bsg.h>
23 #include <asm/scatterlist.h>
26 struct scsi_ioctl_command;
29 struct elevator_queue;
30 struct request_pm_state;
37 #define BLKDEV_MIN_RQ 4
38 #define BLKDEV_MAX_RQ 128
44 #define BLKCG_MAX_POLS 2
49 #define BLK_RL_SYNCFULL (1U << 0)
50 #define BLK_RL_ASYNCFULL (1U << 1)
54 #ifdef CONFIG_BLK_CGROUP
71 enum rq_cmd_type_bits {
84 REQ_TYPE_ATA_TASKFILE,
88 #define BLK_MAX_CDB 16
101 unsigned int cmd_flags;
102 enum rq_cmd_type_bits cmd_type;
103 unsigned long atomic_flags;
108 unsigned int __data_len;
122 void *completion_data;
140 rq_end_io_fn *saved_end_io;
144 struct gendisk *rq_disk;
145 struct hd_struct *
part;
147 #ifdef CONFIG_BLK_CGROUP
148 struct request_list *rl;
149 unsigned long long start_time_ns;
150 unsigned long long io_start_time_ns;
155 unsigned short nr_phys_segments;
156 #if defined(CONFIG_BLK_DEV_INTEGRITY)
157 unsigned short nr_integrity_segments;
160 unsigned short ioprio;
173 unsigned char __cmd[BLK_MAX_CDB];
177 unsigned int extra_len;
179 unsigned int resid_len;
182 unsigned long deadline;
190 rq_end_io_fn *end_io;
197 static inline unsigned short req_get_ioprio(
struct request *
req)
206 struct request_pm_state
223 struct bvec_merge_data {
229 typedef int (merge_bvec_fn) (
struct request_queue *,
struct bvec_merge_data *,
232 typedef int (dma_drain_needed_fn)(
struct request *);
236 enum blk_eh_timer_return {
242 typedef enum blk_eh_timer_return (rq_timed_out_fn)(
struct request *);
244 enum blk_queue_state {
249 struct blk_queue_tag {
251 unsigned long *tag_map;
258 #define BLK_SCSI_MAX_CMDS (256)
259 #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
261 struct queue_limits {
262 unsigned long bounce_pfn;
263 unsigned long seg_boundary_mask;
265 unsigned int max_hw_sectors;
266 unsigned int max_sectors;
267 unsigned int max_segment_size;
268 unsigned int physical_block_size;
272 unsigned int max_discard_sectors;
273 unsigned int max_write_same_sectors;
274 unsigned int discard_granularity;
275 unsigned int discard_alignment;
277 unsigned short logical_block_size;
278 unsigned short max_segments;
279 unsigned short max_integrity_segments;
281 unsigned char misaligned;
282 unsigned char discard_misaligned;
284 unsigned char discard_zeroes_data;
293 struct elevator_queue *elevator;
303 struct request_list root_rl;
305 request_fn_proc *request_fn;
306 make_request_fn *make_request_fn;
307 prep_rq_fn *prep_rq_fn;
308 unprep_rq_fn *unprep_rq_fn;
309 merge_bvec_fn *merge_bvec_fn;
310 softirq_done_fn *softirq_done_fn;
311 rq_timed_out_fn *rq_timed_out_fn;
312 dma_drain_needed_fn *dma_drain_needed;
313 lld_busy_fn *lld_busy_fn;
337 unsigned long queue_flags;
366 unsigned long nr_requests;
367 unsigned int nr_congestion_on;
368 unsigned int nr_congestion_off;
369 unsigned int nr_batching;
371 unsigned int dma_drain_size;
372 void *dma_drain_buffer;
373 unsigned int dma_pad_mask;
374 unsigned int dma_alignment;
376 struct blk_queue_tag *queue_tags;
379 unsigned int nr_sorted;
380 unsigned int in_flight[2];
382 unsigned int rq_timeout;
387 #ifdef CONFIG_BLK_CGROUP
393 struct queue_limits limits;
398 unsigned int sg_timeout;
399 unsigned int sg_reserved_size;
401 #ifdef CONFIG_BLK_DEV_IO_TRACE
402 struct blk_trace *blk_trace;
407 unsigned int flush_flags;
408 unsigned int flush_not_queueable:1;
409 unsigned int flush_queue_delayed:1;
410 unsigned int flush_pending_idx:1;
411 unsigned int flush_running_idx:1;
412 unsigned long flush_pending_since;
417 struct mutex sysfs_lock;
421 #if defined(CONFIG_BLK_DEV_BSG)
422 bsg_job_fn *bsg_job_fn;
424 struct bsg_class_device bsg_dev;
427 #ifdef CONFIG_BLK_CGROUP
430 #ifdef CONFIG_BLK_DEV_THROTTLING
436 #define QUEUE_FLAG_QUEUED 1
437 #define QUEUE_FLAG_STOPPED 2
438 #define QUEUE_FLAG_SYNCFULL 3
439 #define QUEUE_FLAG_ASYNCFULL 4
440 #define QUEUE_FLAG_DEAD 5
441 #define QUEUE_FLAG_BYPASS 6
442 #define QUEUE_FLAG_BIDI 7
443 #define QUEUE_FLAG_NOMERGES 8
444 #define QUEUE_FLAG_SAME_COMP 9
445 #define QUEUE_FLAG_FAIL_IO 10
446 #define QUEUE_FLAG_STACKABLE 11
447 #define QUEUE_FLAG_NONROT 12
448 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT
449 #define QUEUE_FLAG_IO_STAT 13
450 #define QUEUE_FLAG_DISCARD 14
451 #define QUEUE_FLAG_NOXMERGES 15
452 #define QUEUE_FLAG_ADD_RANDOM 16
453 #define QUEUE_FLAG_SECDISCARD 17
454 #define QUEUE_FLAG_SAME_FORCE 18
456 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
457 (1 << QUEUE_FLAG_STACKABLE) | \
458 (1 << QUEUE_FLAG_SAME_COMP) | \
459 (1 << QUEUE_FLAG_ADD_RANDOM))
461 static inline void queue_lockdep_assert_held(
struct request_queue *
q)
467 static inline void queue_flag_set_unlocked(
unsigned int flag,
473 static inline int queue_flag_test_and_clear(
unsigned int flag,
476 queue_lockdep_assert_held(q);
478 if (
test_bit(flag, &q->queue_flags)) {
486 static inline int queue_flag_test_and_set(
unsigned int flag,
489 queue_lockdep_assert_held(q);
491 if (!
test_bit(flag, &q->queue_flags)) {
499 static inline void queue_flag_set(
unsigned int flag,
struct request_queue *q)
501 queue_lockdep_assert_held(q);
505 static inline void queue_flag_clear_unlocked(
unsigned int flag,
513 return q->in_flight[0] + q->in_flight[1];
516 static inline void queue_flag_clear(
unsigned int flag,
struct request_queue *q)
518 queue_lockdep_assert_held(q);
522 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
523 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
524 #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
525 #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
526 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
527 #define blk_queue_noxmerges(q) \
528 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
529 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
530 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
531 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
532 #define blk_queue_stackable(q) \
533 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
534 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
535 #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
536 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
538 #define blk_noretry_request(rq) \
539 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
540 REQ_FAILFAST_DRIVER))
542 #define blk_account_rq(rq) \
543 (((rq)->cmd_flags & REQ_STARTED) && \
544 ((rq)->cmd_type == REQ_TYPE_FS))
546 #define blk_pm_request(rq) \
547 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
548 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
550 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
551 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
553 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
555 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
557 #define rq_data_dir(rq) ((rq)->cmd_flags & 1)
559 static inline unsigned int blk_queue_cluster(
struct request_queue *q)
561 return q->limits.cluster;
567 static inline bool rw_is_sync(
unsigned int rw_flags)
572 static inline bool rq_is_sync(
struct request *
rq)
574 return rw_is_sync(rq->cmd_flags);
577 static inline bool blk_rl_full(
struct request_list *rl,
bool sync)
579 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
581 return rl->flags &
flag;
584 static inline void blk_set_rl_full(
struct request_list *rl,
bool sync)
586 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
591 static inline void blk_clear_rl_full(
struct request_list *rl,
bool sync)
593 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
598 static inline bool rq_mergeable(
struct request *
rq)
600 if (rq->cmd_type != REQ_TYPE_FS)
609 static inline bool blk_check_merge_flags(
unsigned int flags1,
612 if ((flags1 &
REQ_DISCARD) != (flags2 & REQ_DISCARD))
615 if ((flags1 &
REQ_SECURE) != (flags2 & REQ_SECURE))
624 static inline bool blk_write_same_mergeable(
struct bio *
a,
struct bio *
b)
626 if (bio_data(a) == bio_data(b))
636 #define BLKPREP_KILL 1
637 #define BLKPREP_DEFER 2
649 #if BITS_PER_LONG == 32
650 #define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
652 #define BLK_BOUNCE_HIGH -1ULL
654 #define BLK_BOUNCE_ANY (-1ULL)
655 #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
660 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
661 #define BLK_MIN_SG_TIMEOUT (7 * HZ)
685 struct req_iterator {
691 #define for_each_bio(_bio) \
692 for (; _bio; _bio = _bio->bi_next)
693 #define __rq_for_each_bio(_bio, rq) \
695 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
697 #define rq_for_each_segment(bvl, _rq, _iter) \
698 __rq_for_each_bio(_iter.bio, _rq) \
699 bio_for_each_segment(bvl, _iter.bio, _iter.i)
701 #define rq_iter_last(rq, _iter) \
702 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
704 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
705 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
707 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
708 extern void rq_flush_dcache_pages(
struct request *rq);
710 static inline void rq_flush_dcache_pages(
struct request *rq)
731 int (*bio_ctr)(
struct bio *,
struct bio *,
void *),
740 unsigned int,
void __user *);
742 unsigned int,
void __user *);
744 struct scsi_ioctl_command __user *);
775 struct rq_map_data *,
void __user *,
unsigned long,
780 struct rq_map_data *,
struct sg_iovec *,
int,
781 unsigned int,
gfp_t);
785 struct request *,
int, rq_end_io_fn *);
805 static inline unsigned int blk_rq_bytes(
const struct request *rq)
807 return rq->__data_len;
810 static inline int blk_rq_cur_bytes(
const struct request *rq)
812 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
817 static inline unsigned int blk_rq_sectors(
const struct request *rq)
819 return blk_rq_bytes(rq) >> 9;
822 static inline unsigned int blk_rq_cur_sectors(
const struct request *rq)
824 return blk_rq_cur_bytes(rq) >> 9;
827 static inline unsigned int blk_queue_get_max_sectors(
struct request_queue *q,
828 unsigned int cmd_flags)
830 if (
unlikely(cmd_flags & REQ_DISCARD))
831 return q->limits.max_discard_sectors;
833 if (
unlikely(cmd_flags & REQ_WRITE_SAME))
834 return q->limits.max_write_same_sectors;
836 return q->limits.max_sectors;
839 static inline unsigned int blk_rq_get_max_sectors(
struct request *rq)
843 if (
unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
844 return q->limits.max_hw_sectors;
846 return blk_queue_get_max_sectors(q, rq->cmd_flags);
870 unsigned int nr_bytes);
872 unsigned int nr_bytes);
877 unsigned int nr_bytes);
903 unsigned int max_discard_sectors);
905 unsigned int max_write_same_sectors);
926 dma_drain_needed_fn *dma_drain_needed,
969 unsigned int should_sort;
971 #define BLK_MAX_REQUEST_COUNT 16
974 typedef void (*blk_plug_cb_fn)(
struct blk_plug_cb *,
bool);
986 static inline void blk_flush_plug(
struct task_struct *tsk)
994 static inline void blk_schedule_flush_plug(
struct task_struct *tsk)
1002 static inline bool blk_needs_flush_plug(
struct task_struct *tsk)
1006 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
1012 #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
1023 static inline struct request *blk_map_queue_find_tag(
struct blk_queue_tag *bqt,
1026 if (
unlikely(bqt ==
NULL || tag >= bqt->real_max_depth))
1028 return bqt->tag_index[
tag];
1031 #define BLKDEV_DISCARD_SECURE 0x01
1058 enum blk_default_limits {
1059 BLK_MAX_SEGMENTS = 128,
1060 BLK_SAFE_MAX_SECTORS = 255,
1061 BLK_DEF_MAX_SECTORS = 1024,
1062 BLK_MAX_SEGMENT_SIZE = 65536,
1063 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFF
UL,
1066 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1068 static inline unsigned long queue_bounce_pfn(
struct request_queue *q)
1070 return q->limits.bounce_pfn;
1073 static inline unsigned long queue_segment_boundary(
struct request_queue *q)
1075 return q->limits.seg_boundary_mask;
1078 static inline unsigned int queue_max_sectors(
struct request_queue *q)
1080 return q->limits.max_sectors;
1083 static inline unsigned int queue_max_hw_sectors(
struct request_queue *q)
1085 return q->limits.max_hw_sectors;
1088 static inline unsigned short queue_max_segments(
struct request_queue *q)
1090 return q->limits.max_segments;
1093 static inline unsigned int queue_max_segment_size(
struct request_queue *q)
1095 return q->limits.max_segment_size;
1098 static inline unsigned short queue_logical_block_size(
struct request_queue *q)
1102 if (q && q->limits.logical_block_size)
1103 retval = q->limits.logical_block_size;
1108 static inline unsigned short bdev_logical_block_size(
struct block_device *bdev)
1110 return queue_logical_block_size(bdev_get_queue(bdev));
1113 static inline unsigned int queue_physical_block_size(
struct request_queue *q)
1115 return q->limits.physical_block_size;
1118 static inline unsigned int bdev_physical_block_size(
struct block_device *bdev)
1120 return queue_physical_block_size(bdev_get_queue(bdev));
1123 static inline unsigned int queue_io_min(
struct request_queue *q)
1125 return q->limits.io_min;
1128 static inline int bdev_io_min(
struct block_device *bdev)
1130 return queue_io_min(bdev_get_queue(bdev));
1133 static inline unsigned int queue_io_opt(
struct request_queue *q)
1135 return q->limits.io_opt;
1138 static inline int bdev_io_opt(
struct block_device *bdev)
1140 return queue_io_opt(bdev_get_queue(bdev));
1143 static inline int queue_alignment_offset(
struct request_queue *q)
1145 if (q->limits.misaligned)
1148 return q->limits.alignment_offset;
1151 static inline int queue_limit_alignment_offset(
struct queue_limits *lim,
sector_t sector)
1153 unsigned int granularity =
max(lim->physical_block_size, lim->io_min);
1154 unsigned int alignment = (sector << 9) & (granularity - 1);
1156 return (granularity + lim->alignment_offset - alignment)
1157 & (granularity - 1);
1160 static inline int bdev_alignment_offset(
struct block_device *bdev)
1164 if (q->limits.misaligned)
1168 return bdev->
bd_part->alignment_offset;
1170 return q->limits.alignment_offset;
1173 static inline int queue_discard_alignment(
struct request_queue *q)
1175 if (q->limits.discard_misaligned)
1178 return q->limits.discard_alignment;
1181 static inline int queue_limit_discard_alignment(
struct queue_limits *lim,
sector_t sector)
1183 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1185 if (!lim->max_discard_sectors)
1188 return (lim->discard_granularity + lim->discard_alignment - alignment)
1189 & (lim->discard_granularity - 1);
1192 static inline int bdev_discard_alignment(
struct block_device *bdev)
1197 return bdev->
bd_part->discard_alignment;
1199 return q->limits.discard_alignment;
1202 static inline unsigned int queue_discard_zeroes_data(
struct request_queue *q)
1204 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
1210 static inline unsigned int bdev_discard_zeroes_data(
struct block_device *bdev)
1212 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1215 static inline unsigned int bdev_write_same(
struct block_device *bdev)
1220 return q->limits.max_write_same_sectors;
1225 static inline int queue_dma_alignment(
struct request_queue *q)
1227 return q ? q->dma_alignment : 511;
1233 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1234 return !(addr &
alignment) && !(len & alignment);
1238 static inline unsigned int blksize_bits(
unsigned int size)
1240 unsigned int bits = 8;
1244 }
while (size > 256);
1253 static inline bool queue_flush_queueable(
struct request_queue *q)
1255 return !q->flush_not_queueable;
1258 typedef struct {
struct page *
v;} Sector;
1262 static inline void put_dev_sector(Sector
p)
1270 #ifdef CONFIG_BLK_CGROUP
1276 static inline void set_start_time_ns(
struct request *
req)
1283 static inline void set_io_start_time_ns(
struct request *
req)
1292 return req->start_time_ns;
1297 return req->io_start_time_ns;
1300 static inline void set_start_time_ns(
struct request *
req) {}
1301 static inline void set_io_start_time_ns(
struct request *
req) {}
1312 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1313 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1314 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1315 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1317 #if defined(CONFIG_BLK_DEV_INTEGRITY)
1319 #define INTEGRITY_FLAG_READ 2
1320 #define INTEGRITY_FLAG_WRITE 4
1322 struct blk_integrity_exchg {
1331 typedef void (integrity_gen_fn) (
struct blk_integrity_exchg *);
1332 typedef int (integrity_vrfy_fn) (
struct blk_integrity_exchg *);
1333 typedef void (integrity_set_tag_fn) (
void *,
void *,
unsigned int);
1334 typedef void (integrity_get_tag_fn) (
void *,
void *,
unsigned int);
1336 struct blk_integrity {
1337 integrity_gen_fn *generate_fn;
1338 integrity_vrfy_fn *verify_fn;
1339 integrity_set_tag_fn *set_tag_fn;
1340 integrity_get_tag_fn *get_tag_fn;
1342 unsigned short flags;
1343 unsigned short tuple_size;
1365 struct blk_integrity *bdev_get_integrity(
struct block_device *bdev)
1367 return bdev->
bd_disk->integrity;
1370 static inline struct blk_integrity *blk_get_integrity(
struct gendisk *disk)
1372 return disk->integrity;
1375 static inline int blk_integrity_rq(
struct request *rq)
1377 if (rq->bio ==
NULL)
1380 return bio_integrity(rq->bio);
1383 static inline void blk_queue_max_integrity_segments(
struct request_queue *q,
1386 q->limits.max_integrity_segments = segs;
1389 static inline unsigned short
1392 return q->limits.max_integrity_segments;
1400 struct blk_integrity;
1402 static inline int blk_integrity_rq(
struct request *rq)
1417 static inline struct blk_integrity *bdev_get_integrity(
struct block_device *b)
1421 static inline struct blk_integrity *blk_get_integrity(
struct gendisk *disk)
1430 struct blk_integrity *b)
1437 static inline void blk_queue_max_integrity_segments(
struct request_queue *q,
1441 static inline unsigned short queue_max_integrity_segments(
struct request_queue *q)
1464 struct block_device_operations {
1470 void **,
unsigned long *);
1471 unsigned int (*check_events) (
struct gendisk *disk,
1472 unsigned int clearing);
1474 int (*media_changed) (
struct gendisk *);
1475 void (*unlock_native_capacity) (
struct gendisk *);
1489 #define buffer_heads_over_limit 0
1516 static inline bool blk_needs_flush_plug(
struct task_struct *tsk)