Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dm-io.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003 Sistina Software
3  * Copyright (C) 2006 Red Hat GmbH
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/device-mapper.h>
11 
12 #include <linux/bio.h>
13 #include <linux/mempool.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dm-io.h>
18 
19 #define DM_MSG_PREFIX "io"
20 
21 #define DM_IO_MAX_REGIONS BITS_PER_LONG
22 #define MIN_IOS 16
23 #define MIN_BIOS 16
24 
25 struct dm_io_client {
27  struct bio_set *bios;
28 };
29 
30 /*
31  * Aligning 'struct io' reduces the number of bits required to store
32  * its address. Refer to store_io_and_region_in_bio() below.
33  */
34 struct io {
35  unsigned long error_bits;
39  io_notify_fn callback;
40  void *context;
42  unsigned long vma_invalidate_size;
44 
45 static struct kmem_cache *_dm_io_cache;
46 
47 /*
48  * Create a client with mempool and bioset.
49  */
51 {
52  struct dm_io_client *client;
53 
54  client = kmalloc(sizeof(*client), GFP_KERNEL);
55  if (!client)
56  return ERR_PTR(-ENOMEM);
57 
58  client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
59  if (!client->pool)
60  goto bad;
61 
62  client->bios = bioset_create(MIN_BIOS, 0);
63  if (!client->bios)
64  goto bad;
65 
66  return client;
67 
68  bad:
69  if (client->pool)
70  mempool_destroy(client->pool);
71  kfree(client);
72  return ERR_PTR(-ENOMEM);
73 }
75 
77 {
78  mempool_destroy(client->pool);
79  bioset_free(client->bios);
80  kfree(client);
81 }
83 
84 /*-----------------------------------------------------------------
85  * We need to keep track of which region a bio is doing io for.
86  * To avoid a memory allocation to store just 5 or 6 bits, we
87  * ensure the 'struct io' pointer is aligned so enough low bits are
88  * always zero and then combine it with the region number directly in
89  * bi_private.
90  *---------------------------------------------------------------*/
91 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
92  unsigned region)
93 {
94  if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
95  DMCRIT("Unaligned struct io pointer %p", io);
96  BUG();
97  }
98 
99  bio->bi_private = (void *)((unsigned long)io | region);
100 }
101 
102 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
103  unsigned *region)
104 {
105  unsigned long val = (unsigned long)bio->bi_private;
106 
107  *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
108  *region = val & (DM_IO_MAX_REGIONS - 1);
109 }
110 
111 /*-----------------------------------------------------------------
112  * We need an io object to keep track of the number of bios that
113  * have been dispatched for a particular io.
114  *---------------------------------------------------------------*/
115 static void dec_count(struct io *io, unsigned int region, int error)
116 {
117  if (error)
118  set_bit(region, &io->error_bits);
119 
120  if (atomic_dec_and_test(&io->count)) {
121  if (io->vma_invalidate_size)
122  invalidate_kernel_vmap_range(io->vma_invalidate_address,
123  io->vma_invalidate_size);
124 
125  if (io->sleeper)
127 
128  else {
129  unsigned long r = io->error_bits;
130  io_notify_fn fn = io->callback;
131  void *context = io->context;
132 
133  mempool_free(io, io->client->pool);
134  fn(r, context);
135  }
136  }
137 }
138 
139 static void endio(struct bio *bio, int error)
140 {
141  struct io *io;
142  unsigned region;
143 
144  if (error && bio_data_dir(bio) == READ)
145  zero_fill_bio(bio);
146 
147  /*
148  * The bio destructor in bio_put() may use the io object.
149  */
150  retrieve_io_and_region_from_bio(bio, &io, &region);
151 
152  bio_put(bio);
153 
154  dec_count(io, region, error);
155 }
156 
157 /*-----------------------------------------------------------------
158  * These little objects provide an abstraction for getting a new
159  * destination page for io.
160  *---------------------------------------------------------------*/
161 struct dpages {
162  void (*get_page)(struct dpages *dp,
163  struct page **p, unsigned long *len, unsigned *offset);
164  void (*next_page)(struct dpages *dp);
165 
166  unsigned context_u;
167  void *context_ptr;
168 
170  unsigned long vma_invalidate_size;
171 };
172 
173 /*
174  * Functions for getting the pages from a list.
175  */
176 static void list_get_page(struct dpages *dp,
177  struct page **p, unsigned long *len, unsigned *offset)
178 {
179  unsigned o = dp->context_u;
180  struct page_list *pl = (struct page_list *) dp->context_ptr;
181 
182  *p = pl->page;
183  *len = PAGE_SIZE - o;
184  *offset = o;
185 }
186 
187 static void list_next_page(struct dpages *dp)
188 {
189  struct page_list *pl = (struct page_list *) dp->context_ptr;
190  dp->context_ptr = pl->next;
191  dp->context_u = 0;
192 }
193 
194 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
195 {
196  dp->get_page = list_get_page;
197  dp->next_page = list_next_page;
198  dp->context_u = offset;
199  dp->context_ptr = pl;
200 }
201 
202 /*
203  * Functions for getting the pages from a bvec.
204  */
205 static void bvec_get_page(struct dpages *dp,
206  struct page **p, unsigned long *len, unsigned *offset)
207 {
208  struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
209  *p = bvec->bv_page;
210  *len = bvec->bv_len;
211  *offset = bvec->bv_offset;
212 }
213 
214 static void bvec_next_page(struct dpages *dp)
215 {
216  struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
217  dp->context_ptr = bvec + 1;
218 }
219 
220 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
221 {
222  dp->get_page = bvec_get_page;
223  dp->next_page = bvec_next_page;
224  dp->context_ptr = bvec;
225 }
226 
227 /*
228  * Functions for getting the pages from a VMA.
229  */
230 static void vm_get_page(struct dpages *dp,
231  struct page **p, unsigned long *len, unsigned *offset)
232 {
233  *p = vmalloc_to_page(dp->context_ptr);
234  *offset = dp->context_u;
235  *len = PAGE_SIZE - dp->context_u;
236 }
237 
238 static void vm_next_page(struct dpages *dp)
239 {
240  dp->context_ptr += PAGE_SIZE - dp->context_u;
241  dp->context_u = 0;
242 }
243 
244 static void vm_dp_init(struct dpages *dp, void *data)
245 {
246  dp->get_page = vm_get_page;
247  dp->next_page = vm_next_page;
248  dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
249  dp->context_ptr = data;
250 }
251 
252 /*
253  * Functions for getting the pages from kernel memory.
254  */
255 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256  unsigned *offset)
257 {
258  *p = virt_to_page(dp->context_ptr);
259  *offset = dp->context_u;
260  *len = PAGE_SIZE - dp->context_u;
261 }
262 
263 static void km_next_page(struct dpages *dp)
264 {
265  dp->context_ptr += PAGE_SIZE - dp->context_u;
266  dp->context_u = 0;
267 }
268 
269 static void km_dp_init(struct dpages *dp, void *data)
270 {
271  dp->get_page = km_get_page;
272  dp->next_page = km_next_page;
273  dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274  dp->context_ptr = data;
275 }
276 
277 /*-----------------------------------------------------------------
278  * IO routines that accept a list of pages.
279  *---------------------------------------------------------------*/
280 static void do_region(int rw, unsigned region, struct dm_io_region *where,
281  struct dpages *dp, struct io *io)
282 {
283  struct bio *bio;
284  struct page *page;
285  unsigned long len;
286  unsigned offset;
287  unsigned num_bvecs;
288  sector_t remaining = where->count;
289  struct request_queue *q = bdev_get_queue(where->bdev);
290  sector_t discard_sectors;
291 
292  /*
293  * where->count may be zero if rw holds a flush and we need to
294  * send a zero-sized flush.
295  */
296  do {
297  /*
298  * Allocate a suitably sized-bio.
299  */
300  if (rw & REQ_DISCARD)
301  num_bvecs = 1;
302  else
303  num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
304  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
305 
306  bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
307  bio->bi_sector = where->sector + (where->count - remaining);
308  bio->bi_bdev = where->bdev;
309  bio->bi_end_io = endio;
310  store_io_and_region_in_bio(bio, io, region);
311 
312  if (rw & REQ_DISCARD) {
313  discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
314  bio->bi_size = discard_sectors << SECTOR_SHIFT;
315  remaining -= discard_sectors;
316  } else while (remaining) {
317  /*
318  * Try and add as many pages as possible.
319  */
320  dp->get_page(dp, &page, &len, &offset);
321  len = min(len, to_bytes(remaining));
322  if (!bio_add_page(bio, page, len, offset))
323  break;
324 
325  offset = 0;
326  remaining -= to_sector(len);
327  dp->next_page(dp);
328  }
329 
330  atomic_inc(&io->count);
331  submit_bio(rw, bio);
332  } while (remaining);
333 }
334 
335 static void dispatch_io(int rw, unsigned int num_regions,
336  struct dm_io_region *where, struct dpages *dp,
337  struct io *io, int sync)
338 {
339  int i;
340  struct dpages old_pages = *dp;
341 
342  BUG_ON(num_regions > DM_IO_MAX_REGIONS);
343 
344  if (sync)
345  rw |= REQ_SYNC;
346 
347  /*
348  * For multiple regions we need to be careful to rewind
349  * the dp object for each call to do_region.
350  */
351  for (i = 0; i < num_regions; i++) {
352  *dp = old_pages;
353  if (where[i].count || (rw & REQ_FLUSH))
354  do_region(rw, i, where + i, dp, io);
355  }
356 
357  /*
358  * Drop the extra reference that we were holding to avoid
359  * the io being completed too early.
360  */
361  dec_count(io, 0, 0);
362 }
363 
364 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
365  struct dm_io_region *where, int rw, struct dpages *dp,
366  unsigned long *error_bits)
367 {
368  /*
369  * gcc <= 4.3 can't do the alignment for stack variables, so we must
370  * align it on our own.
371  * volatile prevents the optimizer from removing or reusing
372  * "io_" field from the stack frame (allowed in ANSI C).
373  */
374  volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
375  struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
376 
377  if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
378  WARN_ON(1);
379  return -EIO;
380  }
381 
382  io->error_bits = 0;
383  atomic_set(&io->count, 1); /* see dispatch_io() */
384  io->sleeper = current;
385  io->client = client;
386 
389 
390  dispatch_io(rw, num_regions, where, dp, io, 1);
391 
392  while (1) {
394 
395  if (!atomic_read(&io->count))
396  break;
397 
398  io_schedule();
399  }
401 
402  if (error_bits)
403  *error_bits = io->error_bits;
404 
405  return io->error_bits ? -EIO : 0;
406 }
407 
408 static int async_io(struct dm_io_client *client, unsigned int num_regions,
409  struct dm_io_region *where, int rw, struct dpages *dp,
410  io_notify_fn fn, void *context)
411 {
412  struct io *io;
413 
414  if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
415  WARN_ON(1);
416  fn(1, context);
417  return -EIO;
418  }
419 
420  io = mempool_alloc(client->pool, GFP_NOIO);
421  io->error_bits = 0;
422  atomic_set(&io->count, 1); /* see dispatch_io() */
423  io->sleeper = NULL;
424  io->client = client;
425  io->callback = fn;
426  io->context = context;
427 
430 
431  dispatch_io(rw, num_regions, where, dp, io, 0);
432  return 0;
433 }
434 
435 static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
436  unsigned long size)
437 {
438  /* Set up dpages based on memory type */
439 
441  dp->vma_invalidate_size = 0;
442 
443  switch (io_req->mem.type) {
444  case DM_IO_PAGE_LIST:
445  list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
446  break;
447 
448  case DM_IO_BVEC:
449  bvec_dp_init(dp, io_req->mem.ptr.bvec);
450  break;
451 
452  case DM_IO_VMA:
453  flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
454  if ((io_req->bi_rw & RW_MASK) == READ) {
455  dp->vma_invalidate_address = io_req->mem.ptr.vma;
457  }
458  vm_dp_init(dp, io_req->mem.ptr.vma);
459  break;
460 
461  case DM_IO_KMEM:
462  km_dp_init(dp, io_req->mem.ptr.addr);
463  break;
464 
465  default:
466  return -EINVAL;
467  }
468 
469  return 0;
470 }
471 
472 /*
473  * New collapsed (a)synchronous interface.
474  *
475  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
476  * the queue with blk_unplug() some time later or set REQ_SYNC in
477 io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
478  * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
479  */
480 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
481  struct dm_io_region *where, unsigned long *sync_error_bits)
482 {
483  int r;
484  struct dpages dp;
485 
486  r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
487  if (r)
488  return r;
489 
490  if (!io_req->notify.fn)
491  return sync_io(io_req->client, num_regions, where,
492  io_req->bi_rw, &dp, sync_error_bits);
493 
494  return async_io(io_req->client, num_regions, where, io_req->bi_rw,
495  &dp, io_req->notify.fn, io_req->notify.context);
496 }
498 
500 {
501  _dm_io_cache = KMEM_CACHE(io, 0);
502  if (!_dm_io_cache)
503  return -ENOMEM;
504 
505  return 0;
506 }
507 
508 void dm_io_exit(void)
509 {
510  kmem_cache_destroy(_dm_io_cache);
511  _dm_io_cache = NULL;
512 }