4 #include <linux/kernel.h>
5 #include <linux/module.h>
20 rq->biotail->bi_next = bio;
23 rq->__data_len += bio->bi_size;
28 static int __blk_rq_unmap_user(
struct bio *bio)
33 if (bio_flagged(bio, BIO_USER_MAPPED))
43 struct rq_map_data *map_data,
void __user *ubuf,
47 struct bio *bio, *orig_bio;
50 reading = rq_data_dir(rq) ==
READ;
56 uaddr = (
unsigned long) ubuf;
57 if (blk_rq_aligned(q, uaddr, len) && !map_data)
60 bio =
bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
65 if (map_data && map_data->null_mapped)
66 bio->bi_flags |= (1 << BIO_NULL_MAPPED);
83 __blk_rq_unmap_user(orig_bio);
111 struct rq_map_data *map_data,
void __user *ubuf,
112 unsigned long len,
gfp_t gfp_mask)
114 unsigned long bytes_read = 0;
115 struct bio *bio =
NULL;
118 if (len > (queue_max_hw_sectors(q) << 9))
123 if (!ubuf && (!map_data || !map_data->null_mapped))
126 while (bytes_read != len) {
129 map_len =
min_t(
unsigned long, len - bytes_read, BIO_MAX_SIZE);
139 if (end - start > BIO_MAX_PAGES)
142 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
152 map_data->offset +=
ret;
155 if (!bio_flagged(bio, BIO_USER_MAPPED))
191 struct rq_map_data *map_data,
struct sg_iovec *iov,
192 int iov_count,
unsigned int len,
gfp_t gfp_mask)
198 if (!iov || iov_count <= 0)
201 for (i = 0; i < iov_count; i++) {
202 unsigned long uaddr = (
unsigned long)iov[i].iov_base;
210 if (uaddr & queue_dma_alignment(q))
214 if (unaligned || (q->dma_pad_mask & len) || map_data)
223 if (bio->bi_size != len) {
231 __blk_rq_unmap_user(bio);
235 if (!bio_flagged(bio, BIO_USER_MAPPED))
257 struct bio *mapped_bio;
262 if (
unlikely(bio_flagged(bio, BIO_BOUNCED)))
263 mapped_bio = bio->bi_private;
265 ret2 = __blk_rq_unmap_user(mapped_bio);
292 unsigned int len,
gfp_t gfp_mask)
294 int reading = rq_data_dir(rq) ==
READ;
295 unsigned long addr = (
unsigned long) kbuf;
300 if (len > (queue_max_hw_sectors(q) << 9))
305 do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);