Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dm-table.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/string.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/atomic.h>
21 
22 #define DM_MSG_PREFIX "table"
23 
24 #define MAX_DEPTH 16
25 #define NODE_SIZE L1_CACHE_BYTES
26 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
27 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
28 
29 /*
30  * The table has always exactly one reference from either mapped_device->map
31  * or hash_cell->new_map. This reference is not counted in table->holders.
32  * A pair of dm_create_table/dm_destroy_table functions is used for table
33  * creation/destruction.
34  *
35  * Temporary references from the other code increase table->holders. A pair
36  * of dm_table_get/dm_table_put functions is used to manipulate it.
37  *
38  * When the table is about to be destroyed, we wait for table->holders to
39  * drop to zero.
40  */
41 
42 struct dm_table {
43  struct mapped_device *md;
45  unsigned type;
46 
47  /* btree table */
48  unsigned int depth;
49  unsigned int counts[MAX_DEPTH]; /* in nodes */
51 
52  unsigned int num_targets;
53  unsigned int num_allocated;
55  struct dm_target *targets;
56 
58  unsigned integrity_supported:1;
59  unsigned singleton:1;
60 
61  /*
62  * Indicates the rw permissions for the new logical
63  * device. This should be a combination of FMODE_READ
64  * and FMODE_WRITE.
65  */
67 
68  /* a list of devices used by this table */
70 
71  /* events get handed up using this callback */
72  void (*event_fn)(void *);
74 
76 
78 };
79 
80 /*
81  * Similar to ceiling(log_size(n))
82  */
83 static unsigned int int_log(unsigned int n, unsigned int base)
84 {
85  int result = 0;
86 
87  while (n > 1) {
88  n = dm_div_up(n, base);
89  result++;
90  }
91 
92  return result;
93 }
94 
95 /*
96  * Calculate the index of the child node of the n'th node k'th key.
97  */
98 static inline unsigned int get_child(unsigned int n, unsigned int k)
99 {
100  return (n * CHILDREN_PER_NODE) + k;
101 }
102 
103 /*
104  * Return the n'th node of level l from table t.
105  */
106 static inline sector_t *get_node(struct dm_table *t,
107  unsigned int l, unsigned int n)
108 {
109  return t->index[l] + (n * KEYS_PER_NODE);
110 }
111 
112 /*
113  * Return the highest key that you could lookup from the n'th
114  * node on level l of the btree.
115  */
116 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
117 {
118  for (; l < t->depth - 1; l++)
119  n = get_child(n, CHILDREN_PER_NODE - 1);
120 
121  if (n >= t->counts[l])
122  return (sector_t) - 1;
123 
124  return get_node(t, l, n)[KEYS_PER_NODE - 1];
125 }
126 
127 /*
128  * Fills in a level of the btree based on the highs of the level
129  * below it.
130  */
131 static int setup_btree_index(unsigned int l, struct dm_table *t)
132 {
133  unsigned int n, k;
134  sector_t *node;
135 
136  for (n = 0U; n < t->counts[l]; n++) {
137  node = get_node(t, l, n);
138 
139  for (k = 0U; k < KEYS_PER_NODE; k++)
140  node[k] = high(t, l + 1, get_child(n, k));
141  }
142 
143  return 0;
144 }
145 
146 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
147 {
148  unsigned long size;
149  void *addr;
150 
151  /*
152  * Check that we're not going to overflow.
153  */
154  if (nmemb > (ULONG_MAX / elem_size))
155  return NULL;
156 
157  size = nmemb * elem_size;
158  addr = vzalloc(size);
159 
160  return addr;
161 }
163 
164 /*
165  * highs, and targets are managed as dynamic arrays during a
166  * table load.
167  */
168 static int alloc_targets(struct dm_table *t, unsigned int num)
169 {
170  sector_t *n_highs;
171  struct dm_target *n_targets;
172  int n = t->num_targets;
173 
174  /*
175  * Allocate both the target array and offset array at once.
176  * Append an empty entry to catch sectors beyond the end of
177  * the device.
178  */
179  n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
180  sizeof(sector_t));
181  if (!n_highs)
182  return -ENOMEM;
183 
184  n_targets = (struct dm_target *) (n_highs + num);
185 
186  if (n) {
187  memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
188  memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
189  }
190 
191  memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
192  vfree(t->highs);
193 
194  t->num_allocated = num;
195  t->highs = n_highs;
196  t->targets = n_targets;
197 
198  return 0;
199 }
200 
201 int dm_table_create(struct dm_table **result, fmode_t mode,
202  unsigned num_targets, struct mapped_device *md)
203 {
204  struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
205 
206  if (!t)
207  return -ENOMEM;
208 
209  INIT_LIST_HEAD(&t->devices);
210  INIT_LIST_HEAD(&t->target_callbacks);
211  atomic_set(&t->holders, 0);
212 
213  if (!num_targets)
214  num_targets = KEYS_PER_NODE;
215 
216  num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
217 
218  if (alloc_targets(t, num_targets)) {
219  kfree(t);
220  t = NULL;
221  return -ENOMEM;
222  }
223 
224  t->mode = mode;
225  t->md = md;
226  *result = t;
227  return 0;
228 }
229 
230 static void free_devices(struct list_head *devices)
231 {
232  struct list_head *tmp, *next;
233 
234  list_for_each_safe(tmp, next, devices) {
235  struct dm_dev_internal *dd =
236  list_entry(tmp, struct dm_dev_internal, list);
237  DMWARN("dm_table_destroy: dm_put_device call missing for %s",
238  dd->dm_dev.name);
239  kfree(dd);
240  }
241 }
242 
243 void dm_table_destroy(struct dm_table *t)
244 {
245  unsigned int i;
246 
247  if (!t)
248  return;
249 
250  while (atomic_read(&t->holders))
251  msleep(1);
252  smp_mb();
253 
254  /* free the indexes */
255  if (t->depth >= 2)
256  vfree(t->index[t->depth - 2]);
257 
258  /* free the targets */
259  for (i = 0; i < t->num_targets; i++) {
260  struct dm_target *tgt = t->targets + i;
261 
262  if (tgt->type->dtr)
263  tgt->type->dtr(tgt);
264 
265  dm_put_target_type(tgt->type);
266  }
267 
268  vfree(t->highs);
269 
270  /* free the device list */
271  free_devices(&t->devices);
272 
274 
275  kfree(t);
276 }
277 
278 void dm_table_get(struct dm_table *t)
279 {
280  atomic_inc(&t->holders);
281 }
283 
284 void dm_table_put(struct dm_table *t)
285 {
286  if (!t)
287  return;
288 
290  atomic_dec(&t->holders);
291 }
293 
294 /*
295  * Checks to see if we need to extend highs or targets.
296  */
297 static inline int check_space(struct dm_table *t)
298 {
299  if (t->num_targets >= t->num_allocated)
300  return alloc_targets(t, t->num_allocated * 2);
301 
302  return 0;
303 }
304 
305 /*
306  * See if we've already got a device in the list.
307  */
308 static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
309 {
310  struct dm_dev_internal *dd;
311 
312  list_for_each_entry (dd, l, list)
313  if (dd->dm_dev.bdev->bd_dev == dev)
314  return dd;
315 
316  return NULL;
317 }
318 
319 /*
320  * Open a device so we can use it as a map destination.
321  */
322 static int open_dev(struct dm_dev_internal *d, dev_t dev,
324 {
325  static char *_claim_ptr = "I belong to device-mapper";
326  struct block_device *bdev;
327 
328  int r;
329 
330  BUG_ON(d->dm_dev.bdev);
331 
332  bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
333  if (IS_ERR(bdev))
334  return PTR_ERR(bdev);
335 
336  r = bd_link_disk_holder(bdev, dm_disk(md));
337  if (r) {
338  blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
339  return r;
340  }
341 
342  d->dm_dev.bdev = bdev;
343  return 0;
344 }
345 
346 /*
347  * Close a device that we've been using.
348  */
349 static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
350 {
351  if (!d->dm_dev.bdev)
352  return;
353 
354  bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
355  blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
356  d->dm_dev.bdev = NULL;
357 }
358 
359 /*
360  * If possible, this checks an area of a destination device is invalid.
361  */
362 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
363  sector_t start, sector_t len, void *data)
364 {
365  struct request_queue *q;
366  struct queue_limits *limits = data;
367  struct block_device *bdev = dev->bdev;
368  sector_t dev_size =
369  i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
370  unsigned short logical_block_size_sectors =
371  limits->logical_block_size >> SECTOR_SHIFT;
372  char b[BDEVNAME_SIZE];
373 
374  /*
375  * Some devices exist without request functions,
376  * such as loop devices not yet bound to backing files.
377  * Forbid the use of such devices.
378  */
379  q = bdev_get_queue(bdev);
380  if (!q || !q->make_request_fn) {
381  DMWARN("%s: %s is not yet initialised: "
382  "start=%llu, len=%llu, dev_size=%llu",
383  dm_device_name(ti->table->md), bdevname(bdev, b),
384  (unsigned long long)start,
385  (unsigned long long)len,
386  (unsigned long long)dev_size);
387  return 1;
388  }
389 
390  if (!dev_size)
391  return 0;
392 
393  if ((start >= dev_size) || (start + len > dev_size)) {
394  DMWARN("%s: %s too small for target: "
395  "start=%llu, len=%llu, dev_size=%llu",
396  dm_device_name(ti->table->md), bdevname(bdev, b),
397  (unsigned long long)start,
398  (unsigned long long)len,
399  (unsigned long long)dev_size);
400  return 1;
401  }
402 
403  if (logical_block_size_sectors <= 1)
404  return 0;
405 
406  if (start & (logical_block_size_sectors - 1)) {
407  DMWARN("%s: start=%llu not aligned to h/w "
408  "logical block size %u of %s",
409  dm_device_name(ti->table->md),
410  (unsigned long long)start,
411  limits->logical_block_size, bdevname(bdev, b));
412  return 1;
413  }
414 
415  if (len & (logical_block_size_sectors - 1)) {
416  DMWARN("%s: len=%llu not aligned to h/w "
417  "logical block size %u of %s",
418  dm_device_name(ti->table->md),
419  (unsigned long long)len,
420  limits->logical_block_size, bdevname(bdev, b));
421  return 1;
422  }
423 
424  return 0;
425 }
426 
427 /*
428  * This upgrades the mode on an already open dm_dev, being
429  * careful to leave things as they were if we fail to reopen the
430  * device and not to touch the existing bdev field in case
431  * it is accessed concurrently inside dm_table_any_congested().
432  */
433 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
434  struct mapped_device *md)
435 {
436  int r;
437  struct dm_dev_internal dd_new, dd_old;
438 
439  dd_new = dd_old = *dd;
440 
441  dd_new.dm_dev.mode |= new_mode;
442  dd_new.dm_dev.bdev = NULL;
443 
444  r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
445  if (r)
446  return r;
447 
448  dd->dm_dev.mode |= new_mode;
449  close_dev(&dd_old, md);
450 
451  return 0;
452 }
453 
454 /*
455  * Add a device to the list, or just increment the usage count if
456  * it's already present.
457  */
458 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
459  struct dm_dev **result)
460 {
461  int r;
463  struct dm_dev_internal *dd;
464  unsigned int major, minor;
465  struct dm_table *t = ti->table;
466  char dummy;
467 
468  BUG_ON(!t);
469 
470  if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
471  /* Extract the major/minor numbers */
472  dev = MKDEV(major, minor);
473  if (MAJOR(dev) != major || MINOR(dev) != minor)
474  return -EOVERFLOW;
475  } else {
476  /* convert the path to a device */
477  struct block_device *bdev = lookup_bdev(path);
478 
479  if (IS_ERR(bdev))
480  return PTR_ERR(bdev);
481  dev = bdev->bd_dev;
482  bdput(bdev);
483  }
484 
485  dd = find_device(&t->devices, dev);
486  if (!dd) {
487  dd = kmalloc(sizeof(*dd), GFP_KERNEL);
488  if (!dd)
489  return -ENOMEM;
490 
491  dd->dm_dev.mode = mode;
492  dd->dm_dev.bdev = NULL;
493 
494  if ((r = open_dev(dd, dev, t->md))) {
495  kfree(dd);
496  return r;
497  }
498 
499  format_dev_t(dd->dm_dev.name, dev);
500 
501  atomic_set(&dd->count, 0);
502  list_add(&dd->list, &t->devices);
503 
504  } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
505  r = upgrade_mode(dd, mode, t->md);
506  if (r)
507  return r;
508  }
509  atomic_inc(&dd->count);
510 
511  *result = &dd->dm_dev;
512  return 0;
513 }
515 
516 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
517  sector_t start, sector_t len, void *data)
518 {
519  struct queue_limits *limits = data;
520  struct block_device *bdev = dev->bdev;
521  struct request_queue *q = bdev_get_queue(bdev);
522  char b[BDEVNAME_SIZE];
523 
524  if (unlikely(!q)) {
525  DMWARN("%s: Cannot set limits for nonexistent device %s",
526  dm_device_name(ti->table->md), bdevname(bdev, b));
527  return 0;
528  }
529 
530  if (bdev_stack_limits(limits, bdev, start) < 0)
531  DMWARN("%s: adding target device %s caused an alignment inconsistency: "
532  "physical_block_size=%u, logical_block_size=%u, "
533  "alignment_offset=%u, start=%llu",
534  dm_device_name(ti->table->md), bdevname(bdev, b),
535  q->limits.physical_block_size,
536  q->limits.logical_block_size,
537  q->limits.alignment_offset,
538  (unsigned long long) start << SECTOR_SHIFT);
539 
540  /*
541  * Check if merge fn is supported.
542  * If not we'll force DM to use PAGE_SIZE or
543  * smaller I/O, just to be safe.
544  */
545  if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
547  (unsigned int) (PAGE_SIZE >> 9));
548  return 0;
549 }
551 
552 /*
553  * Decrement a device's use count and remove it if necessary.
554  */
555 void dm_put_device(struct dm_target *ti, struct dm_dev *d)
556 {
557  struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
558  dm_dev);
559 
560  if (atomic_dec_and_test(&dd->count)) {
561  close_dev(dd, ti->table->md);
562  list_del(&dd->list);
563  kfree(dd);
564  }
565 }
567 
568 /*
569  * Checks to see if the target joins onto the end of the table.
570  */
571 static int adjoin(struct dm_table *table, struct dm_target *ti)
572 {
573  struct dm_target *prev;
574 
575  if (!table->num_targets)
576  return !ti->begin;
577 
578  prev = &table->targets[table->num_targets - 1];
579  return (ti->begin == (prev->begin + prev->len));
580 }
581 
582 /*
583  * Used to dynamically allocate the arg array.
584  */
585 static char **realloc_argv(unsigned *array_size, char **old_argv)
586 {
587  char **argv;
588  unsigned new_size;
589 
590  new_size = *array_size ? *array_size * 2 : 64;
591  argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
592  if (argv) {
593  memcpy(argv, old_argv, *array_size * sizeof(*argv));
594  *array_size = new_size;
595  }
596 
597  kfree(old_argv);
598  return argv;
599 }
600 
601 /*
602  * Destructively splits up the argument list to pass to ctr.
603  */
604 int dm_split_args(int *argc, char ***argvp, char *input)
605 {
606  char *start, *end = input, *out, **argv = NULL;
607  unsigned array_size = 0;
608 
609  *argc = 0;
610 
611  if (!input) {
612  *argvp = NULL;
613  return 0;
614  }
615 
616  argv = realloc_argv(&array_size, argv);
617  if (!argv)
618  return -ENOMEM;
619 
620  while (1) {
621  /* Skip whitespace */
622  start = skip_spaces(end);
623 
624  if (!*start)
625  break; /* success, we hit the end */
626 
627  /* 'out' is used to remove any back-quotes */
628  end = out = start;
629  while (*end) {
630  /* Everything apart from '\0' can be quoted */
631  if (*end == '\\' && *(end + 1)) {
632  *out++ = *(end + 1);
633  end += 2;
634  continue;
635  }
636 
637  if (isspace(*end))
638  break; /* end of token */
639 
640  *out++ = *end++;
641  }
642 
643  /* have we already filled the array ? */
644  if ((*argc + 1) > array_size) {
645  argv = realloc_argv(&array_size, argv);
646  if (!argv)
647  return -ENOMEM;
648  }
649 
650  /* we know this is whitespace */
651  if (*end)
652  end++;
653 
654  /* terminate the string and put it in the array */
655  *out = '\0';
656  argv[*argc] = start;
657  (*argc)++;
658  }
659 
660  *argvp = argv;
661  return 0;
662 }
663 
664 /*
665  * Impose necessary and sufficient conditions on a devices's table such
666  * that any incoming bio which respects its logical_block_size can be
667  * processed successfully. If it falls across the boundary between
668  * two or more targets, the size of each piece it gets split into must
669  * be compatible with the logical_block_size of the target processing it.
670  */
671 static int validate_hardware_logical_block_alignment(struct dm_table *table,
672  struct queue_limits *limits)
673 {
674  /*
675  * This function uses arithmetic modulo the logical_block_size
676  * (in units of 512-byte sectors).
677  */
678  unsigned short device_logical_block_size_sects =
679  limits->logical_block_size >> SECTOR_SHIFT;
680 
681  /*
682  * Offset of the start of the next table entry, mod logical_block_size.
683  */
684  unsigned short next_target_start = 0;
685 
686  /*
687  * Given an aligned bio that extends beyond the end of a
688  * target, how many sectors must the next target handle?
689  */
690  unsigned short remaining = 0;
691 
692  struct dm_target *uninitialized_var(ti);
693  struct queue_limits ti_limits;
694  unsigned i = 0;
695 
696  /*
697  * Check each entry in the table in turn.
698  */
699  while (i < dm_table_get_num_targets(table)) {
700  ti = dm_table_get_target(table, i++);
701 
702  blk_set_stacking_limits(&ti_limits);
703 
704  /* combine all target devices' limits */
705  if (ti->type->iterate_devices)
706  ti->type->iterate_devices(ti, dm_set_device_limits,
707  &ti_limits);
708 
709  /*
710  * If the remaining sectors fall entirely within this
711  * table entry are they compatible with its logical_block_size?
712  */
713  if (remaining < ti->len &&
714  remaining & ((ti_limits.logical_block_size >>
715  SECTOR_SHIFT) - 1))
716  break; /* Error */
717 
718  next_target_start =
719  (unsigned short) ((next_target_start + ti->len) &
720  (device_logical_block_size_sects - 1));
721  remaining = next_target_start ?
722  device_logical_block_size_sects - next_target_start : 0;
723  }
724 
725  if (remaining) {
726  DMWARN("%s: table line %u (start sect %llu len %llu) "
727  "not aligned to h/w logical block size %u",
728  dm_device_name(table->md), i,
729  (unsigned long long) ti->begin,
730  (unsigned long long) ti->len,
731  limits->logical_block_size);
732  return -EINVAL;
733  }
734 
735  return 0;
736 }
737 
738 int dm_table_add_target(struct dm_table *t, const char *type,
739  sector_t start, sector_t len, char *params)
740 {
741  int r = -EINVAL, argc;
742  char **argv;
743  struct dm_target *tgt;
744 
745  if (t->singleton) {
746  DMERR("%s: target type %s must appear alone in table",
747  dm_device_name(t->md), t->targets->type->name);
748  return -EINVAL;
749  }
750 
751  if ((r = check_space(t)))
752  return r;
753 
754  tgt = t->targets + t->num_targets;
755  memset(tgt, 0, sizeof(*tgt));
756 
757  if (!len) {
758  DMERR("%s: zero-length target", dm_device_name(t->md));
759  return -EINVAL;
760  }
761 
762  tgt->type = dm_get_target_type(type);
763  if (!tgt->type) {
764  DMERR("%s: %s: unknown target type", dm_device_name(t->md),
765  type);
766  return -EINVAL;
767  }
768 
769  if (dm_target_needs_singleton(tgt->type)) {
770  if (t->num_targets) {
771  DMERR("%s: target type %s must appear alone in table",
772  dm_device_name(t->md), type);
773  return -EINVAL;
774  }
775  t->singleton = 1;
776  }
777 
778  if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
779  DMERR("%s: target type %s may not be included in read-only tables",
780  dm_device_name(t->md), type);
781  return -EINVAL;
782  }
783 
784  if (t->immutable_target_type) {
785  if (t->immutable_target_type != tgt->type) {
786  DMERR("%s: immutable target type %s cannot be mixed with other target types",
787  dm_device_name(t->md), t->immutable_target_type->name);
788  return -EINVAL;
789  }
790  } else if (dm_target_is_immutable(tgt->type)) {
791  if (t->num_targets) {
792  DMERR("%s: immutable target type %s cannot be mixed with other target types",
793  dm_device_name(t->md), tgt->type->name);
794  return -EINVAL;
795  }
796  t->immutable_target_type = tgt->type;
797  }
798 
799  tgt->table = t;
800  tgt->begin = start;
801  tgt->len = len;
802  tgt->error = "Unknown error";
803 
804  /*
805  * Does this target adjoin the previous one ?
806  */
807  if (!adjoin(t, tgt)) {
808  tgt->error = "Gap in table";
809  r = -EINVAL;
810  goto bad;
811  }
812 
813  r = dm_split_args(&argc, &argv, params);
814  if (r) {
815  tgt->error = "couldn't split parameters (insufficient memory)";
816  goto bad;
817  }
818 
819  r = tgt->type->ctr(tgt, argc, argv);
820  kfree(argv);
821  if (r)
822  goto bad;
823 
824  t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
825 
826  if (!tgt->num_discard_requests && tgt->discards_supported)
827  DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.",
828  dm_device_name(t->md), type);
829 
830  return 0;
831 
832  bad:
833  DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
834  dm_put_target_type(tgt->type);
835  return r;
836 }
837 
838 /*
839  * Target argument parsing helpers.
840  */
841 static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
842  unsigned *value, char **error, unsigned grouped)
843 {
844  const char *arg_str = dm_shift_arg(arg_set);
845  char dummy;
846 
847  if (!arg_str ||
848  (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
849  (*value < arg->min) ||
850  (*value > arg->max) ||
851  (grouped && arg_set->argc < *value)) {
852  *error = arg->error;
853  return -EINVAL;
854  }
855 
856  return 0;
857 }
858 
859 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
860  unsigned *value, char **error)
861 {
862  return validate_next_arg(arg, arg_set, value, error, 0);
863 }
865 
866 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
867  unsigned *value, char **error)
868 {
869  return validate_next_arg(arg, arg_set, value, error, 1);
870 }
872 
873 const char *dm_shift_arg(struct dm_arg_set *as)
874 {
875  char *r;
876 
877  if (as->argc) {
878  as->argc--;
879  r = *as->argv;
880  as->argv++;
881  return r;
882  }
883 
884  return NULL;
885 }
887 
888 void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
889 {
890  BUG_ON(as->argc < num_args);
891  as->argc -= num_args;
892  as->argv += num_args;
893 }
895 
896 static int dm_table_set_type(struct dm_table *t)
897 {
898  unsigned i;
899  unsigned bio_based = 0, request_based = 0;
900  struct dm_target *tgt;
901  struct dm_dev_internal *dd;
902  struct list_head *devices;
903 
904  for (i = 0; i < t->num_targets; i++) {
905  tgt = t->targets + i;
906  if (dm_target_request_based(tgt))
907  request_based = 1;
908  else
909  bio_based = 1;
910 
911  if (bio_based && request_based) {
912  DMWARN("Inconsistent table: different target types"
913  " can't be mixed up");
914  return -EINVAL;
915  }
916  }
917 
918  if (bio_based) {
919  /* We must use this table as bio-based */
920  t->type = DM_TYPE_BIO_BASED;
921  return 0;
922  }
923 
924  BUG_ON(!request_based); /* No targets in this table */
925 
926  /* Non-request-stackable devices can't be used for request-based dm */
927  devices = dm_table_get_devices(t);
928  list_for_each_entry(dd, devices, list) {
929  if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
930  DMWARN("table load rejected: including"
931  " non-request-stackable devices");
932  return -EINVAL;
933  }
934  }
935 
936  /*
937  * Request-based dm supports only tables that have a single target now.
938  * To support multiple targets, request splitting support is needed,
939  * and that needs lots of changes in the block-layer.
940  * (e.g. request completion process for partial completion.)
941  */
942  if (t->num_targets > 1) {
943  DMWARN("Request-based dm doesn't support multiple targets yet");
944  return -EINVAL;
945  }
946 
948 
949  return 0;
950 }
951 
952 unsigned dm_table_get_type(struct dm_table *t)
953 {
954  return t->type;
955 }
956 
958 {
959  return t->immutable_target_type;
960 }
961 
963 {
965 }
966 
968 {
969  unsigned type = dm_table_get_type(t);
970 
971  if (unlikely(type == DM_TYPE_NONE)) {
972  DMWARN("no table type is set, can't allocate mempools");
973  return -EINVAL;
974  }
975 
977  if (!t->mempools)
978  return -ENOMEM;
979 
980  return 0;
981 }
982 
984 {
986  t->mempools = NULL;
987 }
988 
990 {
991  return t->mempools;
992 }
993 
994 static int setup_indexes(struct dm_table *t)
995 {
996  int i;
997  unsigned int total = 0;
998  sector_t *indexes;
999 
1000  /* allocate the space for *all* the indexes */
1001  for (i = t->depth - 2; i >= 0; i--) {
1002  t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1003  total += t->counts[i];
1004  }
1005 
1006  indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
1007  if (!indexes)
1008  return -ENOMEM;
1009 
1010  /* set up internal nodes, bottom-up */
1011  for (i = t->depth - 2; i >= 0; i--) {
1012  t->index[i] = indexes;
1013  indexes += (KEYS_PER_NODE * t->counts[i]);
1014  setup_btree_index(i, t);
1015  }
1016 
1017  return 0;
1018 }
1019 
1020 /*
1021  * Builds the btree to index the map.
1022  */
1023 static int dm_table_build_index(struct dm_table *t)
1024 {
1025  int r = 0;
1026  unsigned int leaf_nodes;
1027 
1028  /* how many indexes will the btree have ? */
1029  leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1030  t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1031 
1032  /* leaf layer has already been set up */
1033  t->counts[t->depth - 1] = leaf_nodes;
1034  t->index[t->depth - 1] = t->highs;
1035 
1036  if (t->depth >= 2)
1037  r = setup_indexes(t);
1038 
1039  return r;
1040 }
1041 
1042 /*
1043  * Get a disk whose integrity profile reflects the table's profile.
1044  * If %match_all is true, all devices' profiles must match.
1045  * If %match_all is false, all devices must at least have an
1046  * allocated integrity profile; but uninitialized is ok.
1047  * Returns NULL if integrity support was inconsistent or unavailable.
1048  */
1049 static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
1050  bool match_all)
1051 {
1052  struct list_head *devices = dm_table_get_devices(t);
1053  struct dm_dev_internal *dd = NULL;
1054  struct gendisk *prev_disk = NULL, *template_disk = NULL;
1055 
1056  list_for_each_entry(dd, devices, list) {
1057  template_disk = dd->dm_dev.bdev->bd_disk;
1058  if (!blk_get_integrity(template_disk))
1059  goto no_integrity;
1060  if (!match_all && !blk_integrity_is_initialized(template_disk))
1061  continue; /* skip uninitialized profiles */
1062  else if (prev_disk &&
1063  blk_integrity_compare(prev_disk, template_disk) < 0)
1064  goto no_integrity;
1065  prev_disk = template_disk;
1066  }
1067 
1068  return template_disk;
1069 
1070 no_integrity:
1071  if (prev_disk)
1072  DMWARN("%s: integrity not set: %s and %s profile mismatch",
1073  dm_device_name(t->md),
1074  prev_disk->disk_name,
1075  template_disk->disk_name);
1076  return NULL;
1077 }
1078 
1079 /*
1080  * Register the mapped device for blk_integrity support if
1081  * the underlying devices have an integrity profile. But all devices
1082  * may not have matching profiles (checking all devices isn't reliable
1083  * during table load because this table may use other DM device(s) which
1084  * must be resumed before they will have an initialized integity profile).
1085  * Stacked DM devices force a 2 stage integrity profile validation:
1086  * 1 - during load, validate all initialized integrity profiles match
1087  * 2 - during resume, validate all integrity profiles match
1088  */
1089 static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
1090 {
1091  struct gendisk *template_disk = NULL;
1092 
1093  template_disk = dm_table_get_integrity_disk(t, false);
1094  if (!template_disk)
1095  return 0;
1096 
1098  t->integrity_supported = 1;
1099  return blk_integrity_register(dm_disk(md), NULL);
1100  }
1101 
1102  /*
1103  * If DM device already has an initalized integrity
1104  * profile the new profile should not conflict.
1105  */
1106  if (blk_integrity_is_initialized(template_disk) &&
1107  blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1108  DMWARN("%s: conflict with existing integrity profile: "
1109  "%s profile mismatch",
1110  dm_device_name(t->md),
1111  template_disk->disk_name);
1112  return 1;
1113  }
1114 
1115  /* Preserve existing initialized integrity profile */
1116  t->integrity_supported = 1;
1117  return 0;
1118 }
1119 
1120 /*
1121  * Prepares the table for use by building the indices,
1122  * setting the type, and allocating mempools.
1123  */
1125 {
1126  int r;
1127 
1128  r = dm_table_set_type(t);
1129  if (r) {
1130  DMERR("unable to set table type");
1131  return r;
1132  }
1133 
1134  r = dm_table_build_index(t);
1135  if (r) {
1136  DMERR("unable to build btrees");
1137  return r;
1138  }
1139 
1140  r = dm_table_prealloc_integrity(t, t->md);
1141  if (r) {
1142  DMERR("could not register integrity profile.");
1143  return r;
1144  }
1145 
1147  if (r)
1148  DMERR("unable to allocate mempools");
1149 
1150  return r;
1151 }
1152 
1153 static DEFINE_MUTEX(_event_lock);
1155  void (*fn)(void *), void *context)
1156 {
1157  mutex_lock(&_event_lock);
1158  t->event_fn = fn;
1159  t->event_context = context;
1160  mutex_unlock(&_event_lock);
1161 }
1162 
1163 void dm_table_event(struct dm_table *t)
1164 {
1165  /*
1166  * You can no longer call dm_table_event() from interrupt
1167  * context, use a bottom half instead.
1168  */
1169  BUG_ON(in_interrupt());
1170 
1171  mutex_lock(&_event_lock);
1172  if (t->event_fn)
1173  t->event_fn(t->event_context);
1174  mutex_unlock(&_event_lock);
1175 }
1177 
1179 {
1180  return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1181 }
1183 
1184 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1185 {
1186  if (index >= t->num_targets)
1187  return NULL;
1188 
1189  return t->targets + index;
1190 }
1191 
1192 /*
1193  * Search the btree for the correct target.
1194  *
1195  * Caller should check returned pointer with dm_target_is_valid()
1196  * to trap I/O beyond end of device.
1197  */
1199 {
1200  unsigned int l, n = 0, k = 0;
1201  sector_t *node;
1202 
1203  for (l = 0; l < t->depth; l++) {
1204  n = get_child(n, k);
1205  node = get_node(t, l, n);
1206 
1207  for (k = 0; k < KEYS_PER_NODE; k++)
1208  if (node[k] >= sector)
1209  break;
1210  }
1211 
1212  return &t->targets[(KEYS_PER_NODE * n) + k];
1213 }
1214 
1215 static int count_device(struct dm_target *ti, struct dm_dev *dev,
1216  sector_t start, sector_t len, void *data)
1217 {
1218  unsigned *num_devices = data;
1219 
1220  (*num_devices)++;
1221 
1222  return 0;
1223 }
1224 
1225 /*
1226  * Check whether a table has no data devices attached using each
1227  * target's iterate_devices method.
1228  * Returns false if the result is unknown because a target doesn't
1229  * support iterate_devices.
1230  */
1232 {
1233  struct dm_target *uninitialized_var(ti);
1234  unsigned i = 0, num_devices = 0;
1235 
1236  while (i < dm_table_get_num_targets(table)) {
1237  ti = dm_table_get_target(table, i++);
1238 
1239  if (!ti->type->iterate_devices)
1240  return false;
1241 
1242  ti->type->iterate_devices(ti, count_device, &num_devices);
1243  if (num_devices)
1244  return false;
1245  }
1246 
1247  return true;
1248 }
1249 
1250 /*
1251  * Establish the new table's queue_limits and validate them.
1252  */
1254  struct queue_limits *limits)
1255 {
1256  struct dm_target *uninitialized_var(ti);
1257  struct queue_limits ti_limits;
1258  unsigned i = 0;
1259 
1260  blk_set_stacking_limits(limits);
1261 
1262  while (i < dm_table_get_num_targets(table)) {
1263  blk_set_stacking_limits(&ti_limits);
1264 
1265  ti = dm_table_get_target(table, i++);
1266 
1267  if (!ti->type->iterate_devices)
1268  goto combine_limits;
1269 
1270  /*
1271  * Combine queue limits of all the devices this target uses.
1272  */
1273  ti->type->iterate_devices(ti, dm_set_device_limits,
1274  &ti_limits);
1275 
1276  /* Set I/O hints portion of queue limits */
1277  if (ti->type->io_hints)
1278  ti->type->io_hints(ti, &ti_limits);
1279 
1280  /*
1281  * Check each device area is consistent with the target's
1282  * overall queue limits.
1283  */
1284  if (ti->type->iterate_devices(ti, device_area_is_invalid,
1285  &ti_limits))
1286  return -EINVAL;
1287 
1288 combine_limits:
1289  /*
1290  * Merge this target's queue limits into the overall limits
1291  * for the table.
1292  */
1293  if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1294  DMWARN("%s: adding target device "
1295  "(start sect %llu len %llu) "
1296  "caused an alignment inconsistency",
1297  dm_device_name(table->md),
1298  (unsigned long long) ti->begin,
1299  (unsigned long long) ti->len);
1300  }
1301 
1302  return validate_hardware_logical_block_alignment(table, limits);
1303 }
1304 
1305 /*
1306  * Set the integrity profile for this device if all devices used have
1307  * matching profiles. We're quite deep in the resume path but still
1308  * don't know if all devices (particularly DM devices this device
1309  * may be stacked on) have matching profiles. Even if the profiles
1310  * don't match we have no way to fail (to resume) at this point.
1311  */
1312 static void dm_table_set_integrity(struct dm_table *t)
1313 {
1314  struct gendisk *template_disk = NULL;
1315 
1316  if (!blk_get_integrity(dm_disk(t->md)))
1317  return;
1318 
1319  template_disk = dm_table_get_integrity_disk(t, true);
1320  if (template_disk)
1322  blk_get_integrity(template_disk));
1323  else if (blk_integrity_is_initialized(dm_disk(t->md)))
1324  DMWARN("%s: device no longer has a valid integrity profile",
1325  dm_device_name(t->md));
1326  else
1327  DMWARN("%s: unable to establish an integrity profile",
1328  dm_device_name(t->md));
1329 }
1330 
1331 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1332  sector_t start, sector_t len, void *data)
1333 {
1334  unsigned flush = (*(unsigned *)data);
1335  struct request_queue *q = bdev_get_queue(dev->bdev);
1336 
1337  return q && (q->flush_flags & flush);
1338 }
1339 
1340 static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
1341 {
1342  struct dm_target *ti;
1343  unsigned i = 0;
1344 
1345  /*
1346  * Require at least one underlying device to support flushes.
1347  * t->devices includes internal dm devices such as mirror logs
1348  * so we need to use iterate_devices here, which targets
1349  * supporting flushes must provide.
1350  */
1351  while (i < dm_table_get_num_targets(t)) {
1352  ti = dm_table_get_target(t, i++);
1353 
1354  if (!ti->num_flush_requests)
1355  continue;
1356 
1357  if (ti->flush_supported)
1358  return 1;
1359 
1360  if (ti->type->iterate_devices &&
1361  ti->type->iterate_devices(ti, device_flush_capable, &flush))
1362  return 1;
1363  }
1364 
1365  return 0;
1366 }
1367 
1368 static bool dm_table_discard_zeroes_data(struct dm_table *t)
1369 {
1370  struct dm_target *ti;
1371  unsigned i = 0;
1372 
1373  /* Ensure that all targets supports discard_zeroes_data. */
1374  while (i < dm_table_get_num_targets(t)) {
1375  ti = dm_table_get_target(t, i++);
1376 
1378  return 0;
1379  }
1380 
1381  return 1;
1382 }
1383 
1384 static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1385  sector_t start, sector_t len, void *data)
1386 {
1387  struct request_queue *q = bdev_get_queue(dev->bdev);
1388 
1389  return q && blk_queue_nonrot(q);
1390 }
1391 
1392 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1393  sector_t start, sector_t len, void *data)
1394 {
1395  struct request_queue *q = bdev_get_queue(dev->bdev);
1396 
1397  return q && !blk_queue_add_random(q);
1398 }
1399 
1400 static bool dm_table_all_devices_attribute(struct dm_table *t,
1402 {
1403  struct dm_target *ti;
1404  unsigned i = 0;
1405 
1406  while (i < dm_table_get_num_targets(t)) {
1407  ti = dm_table_get_target(t, i++);
1408 
1409  if (!ti->type->iterate_devices ||
1410  !ti->type->iterate_devices(ti, func, NULL))
1411  return 0;
1412  }
1413 
1414  return 1;
1415 }
1416 
1418  struct queue_limits *limits)
1419 {
1420  unsigned flush = 0;
1421 
1422  /*
1423  * Copy table's limits to the DM device's request_queue
1424  */
1425  q->limits = *limits;
1426 
1428  queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1429  else
1430  queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1431 
1432  if (dm_table_supports_flush(t, REQ_FLUSH)) {
1433  flush |= REQ_FLUSH;
1434  if (dm_table_supports_flush(t, REQ_FUA))
1435  flush |= REQ_FUA;
1436  }
1437  blk_queue_flush(q, flush);
1438 
1439  if (!dm_table_discard_zeroes_data(t))
1440  q->limits.discard_zeroes_data = 0;
1441 
1442  /* Ensure that all underlying devices are non-rotational. */
1443  if (dm_table_all_devices_attribute(t, device_is_nonrot))
1444  queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
1445  else
1446  queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
1447 
1448  dm_table_set_integrity(t);
1449 
1450  /*
1451  * Determine whether or not this queue's I/O timings contribute
1452  * to the entropy pool, Only request-based targets use this.
1453  * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1454  * have it set.
1455  */
1456  if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1457  queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
1458 
1459  /*
1460  * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1461  * visible to other CPUs because, once the flag is set, incoming bios
1462  * are processed by request-based dm, which refers to the queue
1463  * settings.
1464  * Until the flag set, bios are passed to bio-based dm and queued to
1465  * md->deferred where queue settings are not needed yet.
1466  * Those bios are passed to request-based dm at the resume time.
1467  */
1468  smp_mb();
1469  if (dm_table_request_based(t))
1470  queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1471 }
1472 
1473 unsigned int dm_table_get_num_targets(struct dm_table *t)
1474 {
1475  return t->num_targets;
1476 }
1477 
1479 {
1480  return &t->devices;
1481 }
1482 
1484 {
1485  return t->mode;
1486 }
1488 
1489 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
1490 {
1491  int i = t->num_targets;
1492  struct dm_target *ti = t->targets;
1493 
1494  while (i--) {
1495  if (postsuspend) {
1496  if (ti->type->postsuspend)
1497  ti->type->postsuspend(ti);
1498  } else if (ti->type->presuspend)
1499  ti->type->presuspend(ti);
1500 
1501  ti++;
1502  }
1503 }
1504 
1506 {
1507  if (!t)
1508  return;
1509 
1510  suspend_targets(t, 0);
1511 }
1512 
1514 {
1515  if (!t)
1516  return;
1517 
1518  suspend_targets(t, 1);
1519 }
1520 
1522 {
1523  int i, r = 0;
1524 
1525  for (i = 0; i < t->num_targets; i++) {
1526  struct dm_target *ti = t->targets + i;
1527 
1528  if (!ti->type->preresume)
1529  continue;
1530 
1531  r = ti->type->preresume(ti);
1532  if (r)
1533  return r;
1534  }
1535 
1536  for (i = 0; i < t->num_targets; i++) {
1537  struct dm_target *ti = t->targets + i;
1538 
1539  if (ti->type->resume)
1540  ti->type->resume(ti);
1541  }
1542 
1543  return 0;
1544 }
1545 
1547 {
1548  list_add(&cb->list, &t->target_callbacks);
1549 }
1551 
1552 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1553 {
1554  struct dm_dev_internal *dd;
1555  struct list_head *devices = dm_table_get_devices(t);
1556  struct dm_target_callbacks *cb;
1557  int r = 0;
1558 
1559  list_for_each_entry(dd, devices, list) {
1560  struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1561  char b[BDEVNAME_SIZE];
1562 
1563  if (likely(q))
1564  r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1565  else
1566  DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1567  dm_device_name(t->md),
1568  bdevname(dd->dm_dev.bdev, b));
1569  }
1570 
1572  if (cb->congested_fn)
1573  r |= cb->congested_fn(cb, bdi_bits);
1574 
1575  return r;
1576 }
1577 
1579 {
1580  unsigned i;
1581  struct dm_target *ti;
1582 
1583  for (i = 0; i < t->num_targets; i++) {
1584  ti = t->targets + i;
1585  if (ti->type->busy && ti->type->busy(ti))
1586  return 1;
1587  }
1588 
1589  return 0;
1590 }
1591 
1593 {
1594  return t->md;
1595 }
1597 
1598 static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1599  sector_t start, sector_t len, void *data)
1600 {
1601  struct request_queue *q = bdev_get_queue(dev->bdev);
1602 
1603  return q && blk_queue_discard(q);
1604 }
1605 
1607 {
1608  struct dm_target *ti;
1609  unsigned i = 0;
1610 
1611  /*
1612  * Unless any target used by the table set discards_supported,
1613  * require at least one underlying device to support discards.
1614  * t->devices includes internal dm devices such as mirror logs
1615  * so we need to use iterate_devices here, which targets
1616  * supporting discard selectively must provide.
1617  */
1618  while (i < dm_table_get_num_targets(t)) {
1619  ti = dm_table_get_target(t, i++);
1620 
1621  if (!ti->num_discard_requests)
1622  continue;
1623 
1624  if (ti->discards_supported)
1625  return 1;
1626 
1627  if (ti->type->iterate_devices &&
1628  ti->type->iterate_devices(ti, device_discard_capable, NULL))
1629  return 1;
1630  }
1631 
1632  return 0;
1633 }