Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
blk-map.c
Go to the documentation of this file.
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <scsi/sg.h> /* for struct sg_iovec */
9 
10 #include "blk.h"
11 
13  struct bio *bio)
14 {
15  if (!rq->bio)
16  blk_rq_bio_prep(q, rq, bio);
17  else if (!ll_back_merge_fn(q, rq, bio))
18  return -EINVAL;
19  else {
20  rq->biotail->bi_next = bio;
21  rq->biotail = bio;
22 
23  rq->__data_len += bio->bi_size;
24  }
25  return 0;
26 }
27 
28 static int __blk_rq_unmap_user(struct bio *bio)
29 {
30  int ret = 0;
31 
32  if (bio) {
33  if (bio_flagged(bio, BIO_USER_MAPPED))
34  bio_unmap_user(bio);
35  else
36  ret = bio_uncopy_user(bio);
37  }
38 
39  return ret;
40 }
41 
42 static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
43  struct rq_map_data *map_data, void __user *ubuf,
44  unsigned int len, gfp_t gfp_mask)
45 {
46  unsigned long uaddr;
47  struct bio *bio, *orig_bio;
48  int reading, ret;
49 
50  reading = rq_data_dir(rq) == READ;
51 
52  /*
53  * if alignment requirement is satisfied, map in user pages for
54  * direct dma. else, set up kernel bounce buffers
55  */
56  uaddr = (unsigned long) ubuf;
57  if (blk_rq_aligned(q, uaddr, len) && !map_data)
58  bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
59  else
60  bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
61 
62  if (IS_ERR(bio))
63  return PTR_ERR(bio);
64 
65  if (map_data && map_data->null_mapped)
66  bio->bi_flags |= (1 << BIO_NULL_MAPPED);
67 
68  orig_bio = bio;
69  blk_queue_bounce(q, &bio);
70 
71  /*
72  * We link the bounce buffer in and could have to traverse it
73  * later so we have to get a ref to prevent it from being freed
74  */
75  bio_get(bio);
76 
77  ret = blk_rq_append_bio(q, rq, bio);
78  if (!ret)
79  return bio->bi_size;
80 
81  /* if it was boucned we must call the end io function */
82  bio_endio(bio, 0);
83  __blk_rq_unmap_user(orig_bio);
84  bio_put(bio);
85  return ret;
86 }
87 
110 int blk_rq_map_user(struct request_queue *q, struct request *rq,
111  struct rq_map_data *map_data, void __user *ubuf,
112  unsigned long len, gfp_t gfp_mask)
113 {
114  unsigned long bytes_read = 0;
115  struct bio *bio = NULL;
116  int ret;
117 
118  if (len > (queue_max_hw_sectors(q) << 9))
119  return -EINVAL;
120  if (!len)
121  return -EINVAL;
122 
123  if (!ubuf && (!map_data || !map_data->null_mapped))
124  return -EINVAL;
125 
126  while (bytes_read != len) {
127  unsigned long map_len, end, start;
128 
129  map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
130  end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
131  >> PAGE_SHIFT;
132  start = (unsigned long)ubuf >> PAGE_SHIFT;
133 
134  /*
135  * A bad offset could cause us to require BIO_MAX_PAGES + 1
136  * pages. If this happens we just lower the requested
137  * mapping len by a page so that we can fit
138  */
139  if (end - start > BIO_MAX_PAGES)
140  map_len -= PAGE_SIZE;
141 
142  ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
143  gfp_mask);
144  if (ret < 0)
145  goto unmap_rq;
146  if (!bio)
147  bio = rq->bio;
148  bytes_read += ret;
149  ubuf += ret;
150 
151  if (map_data)
152  map_data->offset += ret;
153  }
154 
155  if (!bio_flagged(bio, BIO_USER_MAPPED))
156  rq->cmd_flags |= REQ_COPY_USER;
157 
158  rq->buffer = NULL;
159  return 0;
160 unmap_rq:
161  blk_rq_unmap_user(bio);
162  rq->bio = NULL;
163  return ret;
164 }
166 
190 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
191  struct rq_map_data *map_data, struct sg_iovec *iov,
192  int iov_count, unsigned int len, gfp_t gfp_mask)
193 {
194  struct bio *bio;
195  int i, read = rq_data_dir(rq) == READ;
196  int unaligned = 0;
197 
198  if (!iov || iov_count <= 0)
199  return -EINVAL;
200 
201  for (i = 0; i < iov_count; i++) {
202  unsigned long uaddr = (unsigned long)iov[i].iov_base;
203 
204  if (!iov[i].iov_len)
205  return -EINVAL;
206 
207  /*
208  * Keep going so we check length of all segments
209  */
210  if (uaddr & queue_dma_alignment(q))
211  unaligned = 1;
212  }
213 
214  if (unaligned || (q->dma_pad_mask & len) || map_data)
215  bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
216  gfp_mask);
217  else
218  bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
219 
220  if (IS_ERR(bio))
221  return PTR_ERR(bio);
222 
223  if (bio->bi_size != len) {
224  /*
225  * Grab an extra reference to this bio, as bio_unmap_user()
226  * expects to be able to drop it twice as it happens on the
227  * normal IO completion path
228  */
229  bio_get(bio);
230  bio_endio(bio, 0);
231  __blk_rq_unmap_user(bio);
232  return -EINVAL;
233  }
234 
235  if (!bio_flagged(bio, BIO_USER_MAPPED))
236  rq->cmd_flags |= REQ_COPY_USER;
237 
238  blk_queue_bounce(q, &bio);
239  bio_get(bio);
240  blk_rq_bio_prep(q, rq, bio);
241  rq->buffer = NULL;
242  return 0;
243 }
245 
255 int blk_rq_unmap_user(struct bio *bio)
256 {
257  struct bio *mapped_bio;
258  int ret = 0, ret2;
259 
260  while (bio) {
261  mapped_bio = bio;
262  if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
263  mapped_bio = bio->bi_private;
264 
265  ret2 = __blk_rq_unmap_user(mapped_bio);
266  if (ret2 && !ret)
267  ret = ret2;
268 
269  mapped_bio = bio;
270  bio = bio->bi_next;
271  bio_put(mapped_bio);
272  }
273 
274  return ret;
275 }
277 
291 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
292  unsigned int len, gfp_t gfp_mask)
293 {
294  int reading = rq_data_dir(rq) == READ;
295  unsigned long addr = (unsigned long) kbuf;
296  int do_copy = 0;
297  struct bio *bio;
298  int ret;
299 
300  if (len > (queue_max_hw_sectors(q) << 9))
301  return -EINVAL;
302  if (!len || !kbuf)
303  return -EINVAL;
304 
305  do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
306  if (do_copy)
307  bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
308  else
309  bio = bio_map_kern(q, kbuf, len, gfp_mask);
310 
311  if (IS_ERR(bio))
312  return PTR_ERR(bio);
313 
314  if (!reading)
315  bio->bi_rw |= REQ_WRITE;
316 
317  if (do_copy)
318  rq->cmd_flags |= REQ_COPY_USER;
319 
320  ret = blk_rq_append_bio(q, rq, bio);
321  if (unlikely(ret)) {
322  /* request is too big */
323  bio_put(bio);
324  return ret;
325  }
326 
327  blk_queue_bounce(q, &rq->bio);
328  rq->buffer = NULL;
329  return 0;
330 }