14 #include <linux/kernel.h>
15 #include <linux/module.h>
22 #include <linux/string.h>
25 #include <linux/slab.h>
34 #define CREATE_TRACE_POINTS
61 static void drive_stat_acct(
struct request *
rq,
int new_io)
63 struct hd_struct *
part;
64 int rw = rq_data_dir(rq);
67 if (!blk_do_io_stat(rq))
70 cpu = part_stat_lock();
74 part_stat_inc(cpu, part, merges[rw]);
77 if (!hd_struct_try_get(part)) {
86 part = &rq->rq_disk->part0;
90 part_inc_in_flight(part, rw);
101 nr = q->nr_requests - (q->nr_requests / 8) + 1;
102 if (nr > q->nr_requests)
104 q->nr_congestion_on =
nr;
106 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
109 q->nr_congestion_off =
nr;
127 ret = &q->backing_dev_info;
134 memset(rq, 0,
sizeof(*rq));
136 INIT_LIST_HEAD(&rq->queuelist);
137 INIT_LIST_HEAD(&rq->timeout_list);
141 INIT_HLIST_NODE(&rq->hash);
144 rq->cmd_len = BLK_MAX_CDB;
148 set_start_time_ns(rq);
153 static void req_bio_endio(
struct request *
rq,
struct bio *bio,
158 else if (!
test_bit(BIO_UPTODATE, &bio->bi_flags))
161 if (
unlikely(nbytes > bio->bi_size)) {
163 __func__, nbytes, bio->bi_size);
164 nbytes = bio->bi_size;
168 set_bit(BIO_QUIET, &bio->bi_flags);
171 bio->bi_sector += (nbytes >> 9);
173 if (bio_integrity(bio))
186 rq->rq_disk ? rq->rq_disk->disk_name :
"?", rq->cmd_type,
190 (
unsigned long long)blk_rq_pos(rq),
191 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
193 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
195 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
197 for (bit = 0; bit < BLK_MAX_CDB; bit++)
198 printk(
"%02x ", rq->cmd[bit]);
209 spin_lock_irq(q->queue_lock);
211 spin_unlock_irq(q->queue_lock);
244 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
266 queue_flag_set(QUEUE_FLAG_STOPPED, q);
322 if (
likely(!blk_queue_stopped(q)))
341 spin_unlock_irqrestore(q->queue_lock, flags);
367 spin_lock_irq(q->queue_lock);
385 if (!list_empty(&q->queue_head) && q->request_fn)
388 drain |= q->nr_rqs_elvpriv;
396 drain |= !list_empty(&q->queue_head);
397 for (i = 0; i < 2; i++) {
398 drain |= q->nr_rqs[
i];
399 drain |= q->in_flight[
i];
400 drain |= !list_empty(&q->flush_queue[i]);
404 spin_unlock_irq(q->queue_lock);
417 struct request_list *rl;
419 spin_lock_irq(q->queue_lock);
425 spin_unlock_irq(q->queue_lock);
443 spin_lock_irq(q->queue_lock);
444 drain = !q->bypass_depth++;
445 queue_flag_set(QUEUE_FLAG_BYPASS, q);
446 spin_unlock_irq(q->queue_lock);
464 spin_lock_irq(q->queue_lock);
465 if (!--q->bypass_depth)
466 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
468 spin_unlock_irq(q->queue_lock);
485 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
498 queue_flag_set(QUEUE_FLAG_BYPASS, q);
500 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
501 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
502 queue_flag_set(QUEUE_FLAG_DEAD, q);
503 spin_unlock_irq(lock);
514 if (q->queue_lock != &q->__queue_lock)
515 q->queue_lock = &q->__queue_lock;
516 spin_unlock_irq(lock);
570 q->backing_dev_info.ra_pages =
572 q->backing_dev_info.state = 0;
574 q->backing_dev_info.name =
"block";
577 err =
bdi_init(&q->backing_dev_info);
581 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
582 laptop_mode_timer_fn, (
unsigned long) q);
584 INIT_LIST_HEAD(&q->queue_head);
585 INIT_LIST_HEAD(&q->timeout_list);
586 INIT_LIST_HEAD(&q->icq_list);
587 #ifdef CONFIG_BLK_CGROUP
588 INIT_LIST_HEAD(&q->blkg_list);
590 INIT_LIST_HEAD(&q->flush_queue[0]);
591 INIT_LIST_HEAD(&q->flush_queue[1]);
592 INIT_LIST_HEAD(&q->flush_data_in_flight);
604 q->queue_lock = &q->__queue_lock;
613 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
695 q->prep_rq_fn =
NULL;
696 q->unprep_rq_fn =
NULL;
697 q->queue_flags |= QUEUE_FLAG_DEFAULT;
701 q->queue_lock = lock;
719 if (
likely(!blk_queue_dead(q))) {
728 static inline void blk_free_request(
struct request_list *rl,
struct request *rq)
766 if (!ioc || ioc_batching(q, ioc))
773 static void __freed_request(
struct request_list *rl,
int sync)
781 if (rl == &q->root_rl &&
782 rl->count[sync] < queue_congestion_off_threshold(q))
783 blk_clear_queue_congested(q, sync);
785 if (rl->count[sync] + 1 <= q->nr_requests) {
786 if (waitqueue_active(&rl->wait[sync]))
789 blk_clear_rl_full(rl, sync);
797 static void freed_request(
struct request_list *rl,
unsigned int flags)
800 int sync = rw_is_sync(flags);
807 __freed_request(rl, sync);
809 if (
unlikely(rl->starved[sync ^ 1]))
810 __freed_request(rl, sync ^ 1);
817 static bool blk_rq_should_init_elevator(
struct bio *bio)
839 static struct io_context *rq_ioc(
struct bio *bio)
841 #ifdef CONFIG_BLK_CGROUP
842 if (bio && bio->bi_ioc)
862 static struct request *__get_request(
struct request_list *rl,
int rw_flags,
867 struct elevator_type *
et = q->elevator->type;
870 const bool is_sync = rw_is_sync(rw_flags) != 0;
877 if (may_queue == ELV_MQUEUE_NO)
880 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
881 if (rl->count[is_sync]+1 >= q->nr_requests) {
888 if (!blk_rl_full(rl, is_sync)) {
889 ioc_set_batching(q, ioc);
890 blk_set_rl_full(rl, is_sync);
892 if (may_queue != ELV_MQUEUE_MUST
893 && !ioc_batching(q, ioc)) {
907 if (rl == &q->root_rl)
908 blk_set_queue_congested(q, is_sync);
916 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
919 q->nr_rqs[is_sync]++;
920 rl->count[is_sync]++;
921 rl->starved[is_sync] = 0;
933 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
936 if (et->icq_cache && ioc)
940 if (blk_queue_io_stat(q))
942 spin_unlock_irq(q->queue_lock);
950 blk_rq_set_rl(rq, rl);
954 if (rw_flags & REQ_ELVPRIV) {
955 if (
unlikely(et->icq_cache && !icq)) {
977 if (ioc_batching(q, ioc))
980 trace_block_getrq(q, bio, rw_flags & 1);
991 dev_name(q->backing_dev_info.dev));
993 rq->cmd_flags &= ~REQ_ELVPRIV;
996 spin_lock_irq(q->queue_lock);
998 spin_unlock_irq(q->queue_lock);
1009 spin_lock_irq(q->queue_lock);
1010 freed_request(rl, rw_flags);
1020 if (
unlikely(rl->count[is_sync] == 0))
1021 rl->starved[is_sync] = 1;
1040 struct bio *bio,
gfp_t gfp_mask)
1042 const bool is_sync = rw_is_sync(rw_flags) != 0;
1044 struct request_list *rl;
1047 rl = blk_get_rl(q, bio);
1049 rq = __get_request(rl, rw_flags, bio, gfp_mask);
1062 trace_block_sleeprq(q, bio, rw_flags & 1);
1064 spin_unlock_irq(q->queue_lock);
1072 ioc_set_batching(q,
current->io_context);
1074 spin_lock_irq(q->queue_lock);
1087 create_io_context(gfp_mask, q->node);
1089 spin_lock_irq(q->queue_lock);
1090 rq = get_request(q, rw,
NULL, gfp_mask);
1092 spin_unlock_irq(q->queue_lock);
1139 struct bio *bounce_bio = bio;
1146 return ERR_PTR(ret);
1167 blk_clear_rq_complete(rq);
1168 trace_block_rq_requeue(q, rq);
1170 if (blk_rq_tagged(rq))
1173 BUG_ON(blk_queued_rq(rq));
1182 drive_stat_acct(rq, 1);
1186 static void part_round_stats_single(
int cpu,
struct hd_struct *
part,
1189 if (now == part->stamp)
1192 if (part_in_flight(part)) {
1193 __part_stat_add(cpu, part, time_in_queue,
1194 part_in_flight(part) * (now - part->stamp));
1195 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1221 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1222 part_round_stats_single(cpu, part, now);
1246 unsigned int flags = req->cmd_flags;
1247 struct request_list *rl = blk_rq_rl(req);
1249 BUG_ON(!list_empty(&req->queuelist));
1250 BUG_ON(!hlist_unhashed(&req->hash));
1252 blk_free_request(rl, req);
1253 freed_request(rl, flags);
1261 unsigned long flags;
1266 spin_unlock_irqrestore(q->queue_lock, flags);
1286 struct bio *bio = rq->bio;
1288 bio->bi_io_vec->bv_page =
page;
1289 bio->bi_io_vec->bv_offset = 0;
1290 bio->bi_io_vec->bv_len = len;
1294 bio->bi_phys_segments = 1;
1296 rq->__data_len = rq->resid_len = len;
1297 rq->nr_phys_segments = 1;
1298 rq->buffer = bio_data(bio);
1310 trace_block_bio_backmerge(q, bio);
1315 req->biotail->bi_next = bio;
1317 req->__data_len += bio->bi_size;
1318 req->ioprio =
ioprio_best(req->ioprio, bio_prio(bio));
1320 drive_stat_acct(req, 0);
1324 static bool bio_attempt_front_merge(
struct request_queue *q,
1325 struct request *req,
struct bio *bio)
1332 trace_block_bio_frontmerge(q, bio);
1337 bio->bi_next = req->bio;
1345 req->buffer = bio_data(bio);
1346 req->__sector = bio->bi_sector;
1347 req->__data_len += bio->bi_size;
1348 req->ioprio =
ioprio_best(req->ioprio, bio_prio(bio));
1350 drive_stat_acct(req, 0);
1371 static bool attempt_plug_merge(
struct request_queue *q,
struct bio *bio,
1372 unsigned int *request_count)
1393 if (el_ret == ELEVATOR_BACK_MERGE) {
1394 ret = bio_attempt_back_merge(q, rq, bio);
1397 }
else if (el_ret == ELEVATOR_FRONT_MERGE) {
1398 ret = bio_attempt_front_merge(q, rq, bio);
1409 req->cmd_type = REQ_TYPE_FS;
1416 req->__sector = bio->bi_sector;
1417 req->ioprio = bio_prio(bio);
1423 const bool sync = !!(bio->bi_rw &
REQ_SYNC);
1425 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1427 unsigned int request_count = 0;
1437 spin_lock_irq(q->queue_lock);
1438 where = ELEVATOR_INSERT_FLUSH;
1446 if (attempt_plug_merge(q, bio, &request_count))
1449 spin_lock_irq(q->queue_lock);
1452 if (el_ret == ELEVATOR_BACK_MERGE) {
1453 if (bio_attempt_back_merge(q, req, bio)) {
1459 }
else if (el_ret == ELEVATOR_FRONT_MERGE) {
1460 if (bio_attempt_front_merge(q, req, bio)) {
1474 rw_flags = bio_data_dir(bio);
1482 req = get_request(q, rw_flags, bio,
GFP_NOIO);
1496 if (
test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1507 if (list_empty(&plug->list))
1508 trace_block_plug(q);
1510 if (!plug->should_sort) {
1513 __rq = list_entry_rq(plug->list.prev);
1515 plug->should_sort = 1;
1517 if (request_count >= BLK_MAX_REQUEST_COUNT) {
1519 trace_block_plug(q);
1523 drive_stat_acct(req, 1);
1525 spin_lock_irq(q->queue_lock);
1526 add_acct_request(q, req, where);
1529 spin_unlock_irq(q->queue_lock);
1537 static inline void blk_partition_remap(
struct bio *bio)
1541 if (bio_sectors(bio) && bdev != bdev->
bd_contains) {
1542 struct hd_struct *
p = bdev->
bd_part;
1544 bio->bi_sector += p->start_sect;
1547 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1549 bio->bi_sector - p->start_sect);
1553 static void handle_bad_sector(
struct bio *bio)
1561 (
unsigned long long)bio->bi_sector + bio_sectors(bio),
1562 (
long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1564 set_bit(BIO_EOF, &bio->bi_flags);
1567 #ifdef CONFIG_FAIL_MAKE_REQUEST
1569 static DECLARE_FAULT_ATTR(fail_make_request);
1571 static int __init setup_fail_make_request(
char *
str)
1575 __setup(
"fail_make_request=", setup_fail_make_request);
1577 static bool should_fail_request(
struct hd_struct *part,
unsigned int bytes)
1579 return part->make_it_fail &&
should_fail(&fail_make_request, bytes);
1582 static int __init fail_make_request_debugfs(
void)
1584 struct dentry *
dir = fault_create_debugfs_attr(
"fail_make_request",
1585 NULL, &fail_make_request);
1587 return IS_ERR(dir) ? PTR_ERR(dir) : 0;
1594 static inline bool should_fail_request(
struct hd_struct *part,
1605 static inline int bio_check_eod(
struct bio *bio,
unsigned int nr_sectors)
1613 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1617 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1623 handle_bad_sector(bio);
1631 static noinline_for_stack
bool
1632 generic_make_request_checks(
struct bio *bio)
1635 int nr_sectors = bio_sectors(bio);
1638 struct hd_struct *
part;
1642 if (bio_check_eod(bio, nr_sectors))
1645 q = bdev_get_queue(bio->bi_bdev);
1648 "generic_make_request: Trying to access "
1649 "nonexistent block-device %s (%Lu)\n",
1651 (
long long) bio->bi_sector);
1655 if (
likely(bio_is_rw(bio) &&
1656 nr_sectors > queue_max_hw_sectors(q))) {
1660 queue_max_hw_sectors(q));
1664 part = bio->bi_bdev->bd_part;
1665 if (should_fail_request(part, bio->bi_size) ||
1666 should_fail_request(&part_to_disk(part)->part0,
1674 blk_partition_remap(bio);
1679 if (bio_check_eod(bio, nr_sectors))
1696 (!blk_queue_discard(q) ||
1697 ((bio->bi_rw &
REQ_SECURE) && !blk_queue_secdiscard(q)))) {
1702 if (bio->bi_rw &
REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
1718 trace_block_bio_queue(q, bio);
1754 if (!generic_make_request_checks(bio))
1768 bio_list_add(
current->bio_list, bio);
1787 bio_list_init(&bio_list_on_stack);
1788 current->bio_list = &bio_list_on_stack;
1792 q->make_request_fn(q, bio);
1794 bio = bio_list_pop(
current->bio_list);
1818 if (bio_has_data(bio)) {
1822 count = bdev_logical_block_size(bio->bi_bdev) >> 9;
1824 count = bio_sectors(bio);
1827 count_vm_events(
PGPGOUT, count);
1829 task_io_account_read(bio->bi_size);
1830 count_vm_events(
PGPGIN, count);
1837 (rw & WRITE) ?
"WRITE" :
"READ",
1838 (
unsigned long long)bio->bi_sector,
1871 if (!rq_mergeable(rq))
1874 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
1886 if (rq->nr_phys_segments > queue_max_segments(q)) {
1902 unsigned long flags;
1903 int where = ELEVATOR_INSERT_BACK;
1909 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1914 spin_unlock_irqrestore(q->queue_lock, flags);
1922 BUG_ON(blk_queued_rq(rq));
1925 where = ELEVATOR_INSERT_FLUSH;
1927 add_acct_request(q, rq, where);
1928 if (where == ELEVATOR_INSERT_FLUSH)
1930 spin_unlock_irqrestore(q->queue_lock, flags);
1955 unsigned int bytes = 0;
1959 return blk_rq_bytes(rq);
1968 for (bio = rq->bio; bio; bio = bio->bi_next) {
1969 if ((bio->bi_rw & ff) != ff)
1971 bytes += bio->bi_size;
1975 BUG_ON(blk_rq_bytes(rq) && !bytes);
1980 static void blk_account_io_completion(
struct request *req,
unsigned int bytes)
1982 if (blk_do_io_stat(req)) {
1983 const int rw = rq_data_dir(req);
1984 struct hd_struct *
part;
1987 cpu = part_stat_lock();
1989 part_stat_add(cpu, part,
sectors[rw], bytes >> 9);
1994 static void blk_account_io_done(
struct request *req)
2001 if (blk_do_io_stat(req) && !(req->cmd_flags &
REQ_FLUSH_SEQ)) {
2003 const int rw = rq_data_dir(req);
2004 struct hd_struct *
part;
2007 cpu = part_stat_lock();
2010 part_stat_inc(cpu, part, ios[rw]);
2011 part_stat_add(cpu, part,
ticks[rw], duration);
2013 part_dec_in_flight(part, rw);
2015 hd_struct_put(part);
2041 while ((rq = __elv_next_request(q)) !=
NULL) {
2049 elv_activate_rq(q, rq);
2057 trace_block_rq_issue(q, rq);
2060 if (!q->boundary_rq || q->boundary_rq == rq) {
2061 q->end_sector = rq_end_sector(rq);
2062 q->boundary_rq =
NULL;
2068 if (q->dma_drain_size && blk_rq_bytes(rq)) {
2075 rq->nr_phys_segments++;
2081 ret = q->prep_rq_fn(q, rq);
2082 if (ret == BLKPREP_OK) {
2084 }
else if (ret == BLKPREP_DEFER) {
2091 if (q->dma_drain_size && blk_rq_bytes(rq) &&
2097 --rq->nr_phys_segments;
2102 }
else if (ret == BLKPREP_KILL) {
2124 BUG_ON(list_empty(&rq->queuelist));
2127 list_del_init(&rq->queuelist);
2134 if (blk_account_rq(rq)) {
2135 q->in_flight[rq_is_sync(rq)]++;
2136 set_io_start_time_ns(rq);
2162 req->resid_len = blk_rq_bytes(req);
2164 req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
2226 trace_block_rq_complete(req->q, req);
2236 if (req->cmd_type == REQ_TYPE_FS)
2239 if (error && req->cmd_type == REQ_TYPE_FS &&
2245 error_type =
"recoverable transport";
2248 error_type =
"critical target";
2251 error_type =
"critical nexus";
2259 error_type, req->rq_disk ?
2260 req->rq_disk->disk_name :
"?",
2261 (
unsigned long long)blk_rq_pos(req));
2265 blk_account_io_completion(req, nr_bytes);
2267 total_bytes = bio_nbytes = 0;
2268 while ((bio = req->bio) !=
NULL) {
2271 if (nr_bytes >= bio->bi_size) {
2272 req->bio = bio->bi_next;
2273 nbytes = bio->bi_size;
2274 req_bio_endio(req, bio, nbytes, error);
2278 int idx = bio->bi_idx + next_idx;
2280 if (
unlikely(idx >= bio->bi_vcnt)) {
2283 __func__, idx, bio->bi_vcnt);
2287 nbytes = bio_iovec_idx(bio, idx)->bv_len;
2288 BIO_BUG_ON(nbytes > bio->bi_size);
2294 bio_nbytes += nr_bytes;
2295 total_bytes += nr_bytes;
2328 req->__data_len = 0;
2336 req_bio_endio(req, bio, bio_nbytes, error);
2337 bio->bi_idx += next_idx;
2338 bio_iovec(bio)->bv_offset += nr_bytes;
2339 bio_iovec(bio)->bv_len -= nr_bytes;
2343 req->buffer = bio_data(req->bio);
2346 if (req->cmd_type == REQ_TYPE_FS)
2347 req->__sector += total_bytes >> 9;
2359 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2361 req->__data_len = blk_rq_cur_bytes(req);
2371 static bool blk_update_bidi_request(
struct request *rq,
int error,
2372 unsigned int nr_bytes,
2373 unsigned int bidi_bytes)
2383 if (blk_queue_add_random(rq->q))
2384 add_disk_randomness(rq->rq_disk);
2404 if (q->unprep_rq_fn)
2405 q->unprep_rq_fn(q, req);
2412 static void blk_finish_request(
struct request *req,
int error)
2414 if (blk_rq_tagged(req))
2417 BUG_ON(blk_queued_rq(req));
2420 laptop_io_completion(&req->q->backing_dev_info);
2428 blk_account_io_done(req);
2431 req->end_io(req, error);
2433 if (blk_bidi_rq(req))
2457 static bool blk_end_bidi_request(
struct request *rq,
int error,
2458 unsigned int nr_bytes,
unsigned int bidi_bytes)
2461 unsigned long flags;
2463 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2467 blk_finish_request(rq, error);
2468 spin_unlock_irqrestore(q->queue_lock, flags);
2489 unsigned int nr_bytes,
unsigned int bidi_bytes)
2491 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2494 blk_finish_request(rq, error);
2515 return blk_end_bidi_request(rq, error, nr_bytes, 0);
2530 unsigned int bidi_bytes = 0;
2533 bidi_bytes = blk_rq_bytes(rq->next_rq);
2535 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2607 unsigned int bidi_bytes = 0;
2610 bidi_bytes = blk_rq_bytes(rq->next_rq);
2660 rq->cmd_flags |= bio->bi_rw &
REQ_WRITE;
2662 if (bio_has_data(bio)) {
2664 rq->buffer = bio_data(bio);
2666 rq->__data_len = bio->bi_size;
2667 rq->bio = rq->biotail = bio;
2670 rq->rq_disk = bio->bi_bdev->bd_disk;
2673 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2681 void rq_flush_dcache_pages(
struct request *rq)
2683 struct req_iterator iter;
2684 struct bio_vec *bvec;
2686 rq_for_each_segment(bvec, rq, iter)
2714 return q->lld_busy_fn(q);
2731 while ((bio = rq->bio) !=
NULL) {
2732 rq->bio = bio->bi_next;
2745 dst->cpu = src->cpu;
2747 dst->cmd_type = src->cmd_type;
2748 dst->__sector = blk_rq_pos(src);
2749 dst->__data_len = blk_rq_bytes(src);
2750 dst->nr_phys_segments = src->nr_phys_segments;
2751 dst->ioprio = src->ioprio;
2752 dst->extra_len = src->extra_len;
2775 struct bio_set *bs,
gfp_t gfp_mask,
2776 int (*bio_ctr)(
struct bio *,
struct bio *,
void *),
2779 struct bio *bio, *bio_src;
2786 __rq_for_each_bio(bio_src, rq_src) {
2791 if (bio_ctr && bio_ctr(bio, bio_src, data))
2795 rq->biotail->bi_next = bio;
2798 rq->bio = rq->biotail = bio;
2801 __blk_rq_prep_clone(rq, rq_src);
2827 #define PLUG_MAGIC 0x91827364
2848 INIT_LIST_HEAD(&plug->list);
2849 INIT_LIST_HEAD(&plug->cb_list);
2850 plug->should_sort = 0;
2871 return !(rqa->q < rqb->q ||
2872 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
2885 trace_block_unplug(q,
depth, !from_schedule);
2891 spin_unlock(q->queue_lock);
2900 if (from_schedule) {
2901 spin_unlock(q->queue_lock);
2905 spin_unlock(q->queue_lock);
2910 static void flush_plug_callbacks(
struct blk_plug *plug,
bool from_schedule)
2914 while (!list_empty(&plug->cb_list)) {
2915 list_splice_init(&plug->cb_list, &callbacks);
2917 while (!list_empty(&callbacks)) {
2922 cb->callback(cb, from_schedule);
2931 struct blk_plug_cb *
cb;
2937 if (cb->callback == unplug && cb->data == data)
2941 BUG_ON(size <
sizeof(*cb));
2945 cb->callback = unplug;
2946 list_add(&cb->list, &plug->cb_list);
2955 unsigned long flags;
2962 flush_plug_callbacks(plug, from_schedule);
2963 if (list_empty(&plug->list))
2966 list_splice_init(&plug->list, &
list);
2968 if (plug->should_sort) {
2970 plug->should_sort = 0;
2981 while (!list_empty(&
list)) {
2983 list_del_init(&rq->queuelist);
2990 queue_unplugged(q, depth, from_schedule);
2993 spin_lock(q->queue_lock);
3019 queue_unplugged(q, depth, from_schedule);
3036 sizeof(((
struct request *)0)->cmd_flags));
3041 if (!kblockd_workqueue)
3042 panic(
"Failed to create kblockd\n");