4 #include <linux/kernel.h>
5 #include <linux/module.h>
12 static unsigned int __blk_recalc_rq_segments(
struct request_queue *
q,
15 struct bio_vec *bv, *bvprv =
NULL;
17 unsigned int seg_size, nr_phys_segs;
18 struct bio *fbio, *bbio;
24 cluster = blk_queue_cluster(q);
28 bio_for_each_segment(bv, bio, i) {
34 high =
page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
38 if (seg_size + bv->bv_len
39 > queue_max_segment_size(q))
41 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
43 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
46 seg_size += bv->bv_len;
51 if (nr_phys_segs == 1 && seg_size >
52 fbio->bi_seg_front_size)
53 fbio->bi_seg_front_size = seg_size;
57 seg_size = bv->bv_len;
63 if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
64 fbio->bi_seg_front_size = seg_size;
65 if (seg_size > bbio->bi_seg_back_size)
66 bbio->bi_seg_back_size = seg_size;
73 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
78 struct bio *nxt = bio->bi_next;
81 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
83 bio->bi_flags |= (1 << BIO_SEG_VALID);
87 static int blk_phys_contig_segment(
struct request_queue *q,
struct bio *bio,
90 if (!blk_queue_cluster(q))
93 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
94 queue_max_segment_size(q))
97 if (!bio_has_data(bio))
100 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
107 if (BIO_SEG_BOUNDARY(q, bio, nxt))
114 __blk_segment_map_sg(
struct request_queue *q,
struct bio_vec *bvec,
115 struct scatterlist *sglist,
struct bio_vec **bvprv,
119 int nbytes = bvec->bv_len;
121 if (*bvprv && *cluster) {
122 if ((*sg)->length + nbytes > queue_max_segment_size(q))
125 if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
127 if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
150 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
163 struct bio_vec *bvec, *bvprv;
164 struct req_iterator iter;
169 cluster = blk_queue_cluster(q);
176 rq_for_each_segment(bvec, rq, iter) {
177 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
183 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
185 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
191 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
193 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
199 ((
unsigned long)q->dma_drain_buffer) &
202 rq->extra_len += q->dma_drain_size;
226 struct bio_vec *bvec, *bvprv;
232 cluster = blk_queue_cluster(q);
236 bio_for_each_segment(bvec, bio, i) {
237 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
244 BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
255 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
265 req->nr_phys_segments += nr_phys_segs;
270 if (req == q->last_merge)
271 q->last_merge =
NULL;
278 if (blk_rq_sectors(req) + bio_sectors(bio) >
279 blk_rq_get_max_sectors(req)) {
281 if (req == q->last_merge)
282 q->last_merge =
NULL;
285 if (!bio_flagged(req->biotail, BIO_SEG_VALID))
287 if (!bio_flagged(bio, BIO_SEG_VALID))
290 return ll_new_hw_segment(q, req, bio);
296 if (blk_rq_sectors(req) + bio_sectors(bio) >
297 blk_rq_get_max_sectors(req)) {
299 if (req == q->last_merge)
300 q->last_merge =
NULL;
303 if (!bio_flagged(bio, BIO_SEG_VALID))
305 if (!bio_flagged(req->bio, BIO_SEG_VALID))
308 return ll_new_hw_segment(q, req, bio);
314 int total_phys_segments;
315 unsigned int seg_size =
316 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
322 if (req->special || next->special)
328 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
329 blk_rq_get_max_sectors(req))
332 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
333 if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
334 if (req->nr_phys_segments == 1)
335 req->bio->bi_seg_front_size = seg_size;
336 if (next->nr_phys_segments == 1)
337 next->biotail->bi_seg_back_size = seg_size;
338 total_phys_segments--;
341 if (total_phys_segments > queue_max_segments(q))
348 req->nr_phys_segments = total_phys_segments;
374 for (bio = rq->bio; bio; bio = bio->bi_next) {
382 static void blk_account_io_merge(
struct request *req)
384 if (blk_do_io_stat(req)) {
385 struct hd_struct *
part;
388 cpu = part_stat_lock();
392 part_dec_in_flight(part, rq_data_dir(req));
405 if (!rq_mergeable(req) || !rq_mergeable(next))
408 if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
414 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
417 if (rq_data_dir(req) != rq_data_dir(next)
418 || req->rq_disk != next->rq_disk
423 !blk_write_same_mergeable(req->bio, next->bio))
432 if (!ll_merge_requests_fn(q, req, next))
454 if (
time_after(req->start_time, next->start_time))
455 req->start_time = next->start_time;
457 req->biotail->bi_next = next->bio;
458 req->biotail = next->biotail;
460 req->__data_len += blk_rq_bytes(next);
467 blk_account_io_merge(next);
469 req->ioprio =
ioprio_best(req->ioprio, next->ioprio);
470 if (blk_rq_cpu_valid(next))
471 req->cpu = next->cpu;
484 return attempt_merge(q, rq, next);
494 return attempt_merge(q, prev, rq);
502 return attempt_merge(q, rq, next);
507 if (!rq_mergeable(rq) || !bio_mergeable(bio))
510 if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
514 if (bio_data_dir(bio) != rq_data_dir(rq))
518 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
522 if (bio_integrity(bio) != blk_integrity_rq(rq))
527 !blk_write_same_mergeable(rq->bio, bio))
535 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
536 return ELEVATOR_BACK_MERGE;
537 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
538 return ELEVATOR_FRONT_MERGE;
539 return ELEVATOR_NO_MERGE;