Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mtdconcat.c
Go to the documentation of this file.
1 /*
2  * MTD device concatenation layer
3  *
4  * Copyright © 2002 Robert Kaiser <[email protected]>
5  * Copyright © 2002-2010 David Woodhouse <[email protected]>
6  *
7  * NAND support by Christian Gan <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  *
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/types.h>
30 #include <linux/backing-dev.h>
31 
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/concat.h>
34 
35 #include <asm/div64.h>
36 
37 /*
38  * Our storage structure:
39  * Subdev points to an array of pointers to struct mtd_info objects
40  * which is allocated along with this structure
41  *
42  */
43 struct mtd_concat {
44  struct mtd_info mtd;
46  struct mtd_info **subdev;
47 };
48 
49 /*
50  * how to calculate the size required for the above structure,
51  * including the pointer array subdev points to:
52  */
53 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
54  ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
55 
56 /*
57  * Given a pointer to the MTD object in the mtd_concat structure,
58  * we can retrieve the pointer to that structure with this macro.
59  */
60 #define CONCAT(x) ((struct mtd_concat *)(x))
61 
62 /*
63  * MTD methods which look up the relevant subdevice, translate the
64  * effective address and pass through to the subdevice.
65  */
66 
67 static int
68 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
69  size_t * retlen, u_char * buf)
70 {
71  struct mtd_concat *concat = CONCAT(mtd);
72  int ret = 0, err;
73  int i;
74 
75  for (i = 0; i < concat->num_subdev; i++) {
76  struct mtd_info *subdev = concat->subdev[i];
77  size_t size, retsize;
78 
79  if (from >= subdev->size) {
80  /* Not destined for this subdev */
81  size = 0;
82  from -= subdev->size;
83  continue;
84  }
85  if (from + len > subdev->size)
86  /* First part goes into this subdev */
87  size = subdev->size - from;
88  else
89  /* Entire transaction goes into this subdev */
90  size = len;
91 
92  err = mtd_read(subdev, from, size, &retsize, buf);
93 
94  /* Save information about bitflips! */
95  if (unlikely(err)) {
96  if (mtd_is_eccerr(err)) {
97  mtd->ecc_stats.failed++;
98  ret = err;
99  } else if (mtd_is_bitflip(err)) {
100  mtd->ecc_stats.corrected++;
101  /* Do not overwrite -EBADMSG !! */
102  if (!ret)
103  ret = err;
104  } else
105  return err;
106  }
107 
108  *retlen += retsize;
109  len -= size;
110  if (len == 0)
111  return ret;
112 
113  buf += size;
114  from = 0;
115  }
116  return -EINVAL;
117 }
118 
119 static int
120 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
121  size_t * retlen, const u_char * buf)
122 {
123  struct mtd_concat *concat = CONCAT(mtd);
124  int err = -EINVAL;
125  int i;
126 
127  for (i = 0; i < concat->num_subdev; i++) {
128  struct mtd_info *subdev = concat->subdev[i];
129  size_t size, retsize;
130 
131  if (to >= subdev->size) {
132  size = 0;
133  to -= subdev->size;
134  continue;
135  }
136  if (to + len > subdev->size)
137  size = subdev->size - to;
138  else
139  size = len;
140 
141  err = mtd_write(subdev, to, size, &retsize, buf);
142  if (err)
143  break;
144 
145  *retlen += retsize;
146  len -= size;
147  if (len == 0)
148  break;
149 
150  err = -EINVAL;
151  buf += size;
152  to = 0;
153  }
154  return err;
155 }
156 
157 static int
158 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
159  unsigned long count, loff_t to, size_t * retlen)
160 {
161  struct mtd_concat *concat = CONCAT(mtd);
162  struct kvec *vecs_copy;
163  unsigned long entry_low, entry_high;
164  size_t total_len = 0;
165  int i;
166  int err = -EINVAL;
167 
168  /* Calculate total length of data */
169  for (i = 0; i < count; i++)
170  total_len += vecs[i].iov_len;
171 
172  /* Check alignment */
173  if (mtd->writesize > 1) {
174  uint64_t __to = to;
175  if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
176  return -EINVAL;
177  }
178 
179  /* make a copy of vecs */
180  vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
181  if (!vecs_copy)
182  return -ENOMEM;
183 
184  entry_low = 0;
185  for (i = 0; i < concat->num_subdev; i++) {
186  struct mtd_info *subdev = concat->subdev[i];
187  size_t size, wsize, retsize, old_iov_len;
188 
189  if (to >= subdev->size) {
190  to -= subdev->size;
191  continue;
192  }
193 
194  size = min_t(uint64_t, total_len, subdev->size - to);
195  wsize = size; /* store for future use */
196 
197  entry_high = entry_low;
198  while (entry_high < count) {
199  if (size <= vecs_copy[entry_high].iov_len)
200  break;
201  size -= vecs_copy[entry_high++].iov_len;
202  }
203 
204  old_iov_len = vecs_copy[entry_high].iov_len;
205  vecs_copy[entry_high].iov_len = size;
206 
207  err = mtd_writev(subdev, &vecs_copy[entry_low],
208  entry_high - entry_low + 1, to, &retsize);
209 
210  vecs_copy[entry_high].iov_len = old_iov_len - size;
211  vecs_copy[entry_high].iov_base += size;
212 
213  entry_low = entry_high;
214 
215  if (err)
216  break;
217 
218  *retlen += retsize;
219  total_len -= wsize;
220 
221  if (total_len == 0)
222  break;
223 
224  err = -EINVAL;
225  to = 0;
226  }
227 
228  kfree(vecs_copy);
229  return err;
230 }
231 
232 static int
233 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
234 {
235  struct mtd_concat *concat = CONCAT(mtd);
236  struct mtd_oob_ops devops = *ops;
237  int i, err, ret = 0;
238 
239  ops->retlen = ops->oobretlen = 0;
240 
241  for (i = 0; i < concat->num_subdev; i++) {
242  struct mtd_info *subdev = concat->subdev[i];
243 
244  if (from >= subdev->size) {
245  from -= subdev->size;
246  continue;
247  }
248 
249  /* partial read ? */
250  if (from + devops.len > subdev->size)
251  devops.len = subdev->size - from;
252 
253  err = mtd_read_oob(subdev, from, &devops);
254  ops->retlen += devops.retlen;
255  ops->oobretlen += devops.oobretlen;
256 
257  /* Save information about bitflips! */
258  if (unlikely(err)) {
259  if (mtd_is_eccerr(err)) {
260  mtd->ecc_stats.failed++;
261  ret = err;
262  } else if (mtd_is_bitflip(err)) {
263  mtd->ecc_stats.corrected++;
264  /* Do not overwrite -EBADMSG !! */
265  if (!ret)
266  ret = err;
267  } else
268  return err;
269  }
270 
271  if (devops.datbuf) {
272  devops.len = ops->len - ops->retlen;
273  if (!devops.len)
274  return ret;
275  devops.datbuf += devops.retlen;
276  }
277  if (devops.oobbuf) {
278  devops.ooblen = ops->ooblen - ops->oobretlen;
279  if (!devops.ooblen)
280  return ret;
281  devops.oobbuf += ops->oobretlen;
282  }
283 
284  from = 0;
285  }
286  return -EINVAL;
287 }
288 
289 static int
290 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
291 {
292  struct mtd_concat *concat = CONCAT(mtd);
293  struct mtd_oob_ops devops = *ops;
294  int i, err;
295 
296  if (!(mtd->flags & MTD_WRITEABLE))
297  return -EROFS;
298 
299  ops->retlen = ops->oobretlen = 0;
300 
301  for (i = 0; i < concat->num_subdev; i++) {
302  struct mtd_info *subdev = concat->subdev[i];
303 
304  if (to >= subdev->size) {
305  to -= subdev->size;
306  continue;
307  }
308 
309  /* partial write ? */
310  if (to + devops.len > subdev->size)
311  devops.len = subdev->size - to;
312 
313  err = mtd_write_oob(subdev, to, &devops);
314  ops->retlen += devops.oobretlen;
315  if (err)
316  return err;
317 
318  if (devops.datbuf) {
319  devops.len = ops->len - ops->retlen;
320  if (!devops.len)
321  return 0;
322  devops.datbuf += devops.retlen;
323  }
324  if (devops.oobbuf) {
325  devops.ooblen = ops->ooblen - ops->oobretlen;
326  if (!devops.ooblen)
327  return 0;
328  devops.oobbuf += devops.oobretlen;
329  }
330  to = 0;
331  }
332  return -EINVAL;
333 }
334 
335 static void concat_erase_callback(struct erase_info *instr)
336 {
337  wake_up((wait_queue_head_t *) instr->priv);
338 }
339 
340 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
341 {
342  int err;
343  wait_queue_head_t waitq;
345 
346  /*
347  * This code was stol^H^H^H^Hinspired by mtdchar.c
348  */
349  init_waitqueue_head(&waitq);
350 
351  erase->mtd = mtd;
352  erase->callback = concat_erase_callback;
353  erase->priv = (unsigned long) &waitq;
354 
355  /*
356  * FIXME: Allow INTERRUPTIBLE. Which means
357  * not having the wait_queue head on the stack.
358  */
359  err = mtd_erase(mtd, erase);
360  if (!err) {
362  add_wait_queue(&waitq, &wait);
363  if (erase->state != MTD_ERASE_DONE
364  && erase->state != MTD_ERASE_FAILED)
365  schedule();
366  remove_wait_queue(&waitq, &wait);
368 
369  err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
370  }
371  return err;
372 }
373 
374 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
375 {
376  struct mtd_concat *concat = CONCAT(mtd);
377  struct mtd_info *subdev;
378  int i, err;
379  uint64_t length, offset = 0;
380  struct erase_info *erase;
381 
382  /*
383  * Check for proper erase block alignment of the to-be-erased area.
384  * It is easier to do this based on the super device's erase
385  * region info rather than looking at each particular sub-device
386  * in turn.
387  */
388  if (!concat->mtd.numeraseregions) {
389  /* the easy case: device has uniform erase block size */
390  if (instr->addr & (concat->mtd.erasesize - 1))
391  return -EINVAL;
392  if (instr->len & (concat->mtd.erasesize - 1))
393  return -EINVAL;
394  } else {
395  /* device has variable erase size */
396  struct mtd_erase_region_info *erase_regions =
397  concat->mtd.eraseregions;
398 
399  /*
400  * Find the erase region where the to-be-erased area begins:
401  */
402  for (i = 0; i < concat->mtd.numeraseregions &&
403  instr->addr >= erase_regions[i].offset; i++) ;
404  --i;
405 
406  /*
407  * Now erase_regions[i] is the region in which the
408  * to-be-erased area begins. Verify that the starting
409  * offset is aligned to this region's erase size:
410  */
411  if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
412  return -EINVAL;
413 
414  /*
415  * now find the erase region where the to-be-erased area ends:
416  */
417  for (; i < concat->mtd.numeraseregions &&
418  (instr->addr + instr->len) >= erase_regions[i].offset;
419  ++i) ;
420  --i;
421  /*
422  * check if the ending offset is aligned to this region's erase size
423  */
424  if (i < 0 || ((instr->addr + instr->len) &
425  (erase_regions[i].erasesize - 1)))
426  return -EINVAL;
427  }
428 
429  /* make a local copy of instr to avoid modifying the caller's struct */
430  erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
431 
432  if (!erase)
433  return -ENOMEM;
434 
435  *erase = *instr;
436  length = instr->len;
437 
438  /*
439  * find the subdevice where the to-be-erased area begins, adjust
440  * starting offset to be relative to the subdevice start
441  */
442  for (i = 0; i < concat->num_subdev; i++) {
443  subdev = concat->subdev[i];
444  if (subdev->size <= erase->addr) {
445  erase->addr -= subdev->size;
446  offset += subdev->size;
447  } else {
448  break;
449  }
450  }
451 
452  /* must never happen since size limit has been verified above */
453  BUG_ON(i >= concat->num_subdev);
454 
455  /* now do the erase: */
456  err = 0;
457  for (; length > 0; i++) {
458  /* loop for all subdevices affected by this request */
459  subdev = concat->subdev[i]; /* get current subdevice */
460 
461  /* limit length to subdevice's size: */
462  if (erase->addr + length > subdev->size)
463  erase->len = subdev->size - erase->addr;
464  else
465  erase->len = length;
466 
467  length -= erase->len;
468  if ((err = concat_dev_erase(subdev, erase))) {
469  /* sanity check: should never happen since
470  * block alignment has been checked above */
471  BUG_ON(err == -EINVAL);
472  if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
473  instr->fail_addr = erase->fail_addr + offset;
474  break;
475  }
476  /*
477  * erase->addr specifies the offset of the area to be
478  * erased *within the current subdevice*. It can be
479  * non-zero only the first time through this loop, i.e.
480  * for the first subdevice where blocks need to be erased.
481  * All the following erases must begin at the start of the
482  * current subdevice, i.e. at offset zero.
483  */
484  erase->addr = 0;
485  offset += subdev->size;
486  }
487  instr->state = erase->state;
488  kfree(erase);
489  if (err)
490  return err;
491 
492  if (instr->callback)
493  instr->callback(instr);
494  return 0;
495 }
496 
497 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
498 {
499  struct mtd_concat *concat = CONCAT(mtd);
500  int i, err = -EINVAL;
501 
502  for (i = 0; i < concat->num_subdev; i++) {
503  struct mtd_info *subdev = concat->subdev[i];
504  uint64_t size;
505 
506  if (ofs >= subdev->size) {
507  size = 0;
508  ofs -= subdev->size;
509  continue;
510  }
511  if (ofs + len > subdev->size)
512  size = subdev->size - ofs;
513  else
514  size = len;
515 
516  err = mtd_lock(subdev, ofs, size);
517  if (err)
518  break;
519 
520  len -= size;
521  if (len == 0)
522  break;
523 
524  err = -EINVAL;
525  ofs = 0;
526  }
527 
528  return err;
529 }
530 
531 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
532 {
533  struct mtd_concat *concat = CONCAT(mtd);
534  int i, err = 0;
535 
536  for (i = 0; i < concat->num_subdev; i++) {
537  struct mtd_info *subdev = concat->subdev[i];
538  uint64_t size;
539 
540  if (ofs >= subdev->size) {
541  size = 0;
542  ofs -= subdev->size;
543  continue;
544  }
545  if (ofs + len > subdev->size)
546  size = subdev->size - ofs;
547  else
548  size = len;
549 
550  err = mtd_unlock(subdev, ofs, size);
551  if (err)
552  break;
553 
554  len -= size;
555  if (len == 0)
556  break;
557 
558  err = -EINVAL;
559  ofs = 0;
560  }
561 
562  return err;
563 }
564 
565 static void concat_sync(struct mtd_info *mtd)
566 {
567  struct mtd_concat *concat = CONCAT(mtd);
568  int i;
569 
570  for (i = 0; i < concat->num_subdev; i++) {
571  struct mtd_info *subdev = concat->subdev[i];
572  mtd_sync(subdev);
573  }
574 }
575 
576 static int concat_suspend(struct mtd_info *mtd)
577 {
578  struct mtd_concat *concat = CONCAT(mtd);
579  int i, rc = 0;
580 
581  for (i = 0; i < concat->num_subdev; i++) {
582  struct mtd_info *subdev = concat->subdev[i];
583  if ((rc = mtd_suspend(subdev)) < 0)
584  return rc;
585  }
586  return rc;
587 }
588 
589 static void concat_resume(struct mtd_info *mtd)
590 {
591  struct mtd_concat *concat = CONCAT(mtd);
592  int i;
593 
594  for (i = 0; i < concat->num_subdev; i++) {
595  struct mtd_info *subdev = concat->subdev[i];
596  mtd_resume(subdev);
597  }
598 }
599 
600 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
601 {
602  struct mtd_concat *concat = CONCAT(mtd);
603  int i, res = 0;
604 
605  if (!mtd_can_have_bb(concat->subdev[0]))
606  return res;
607 
608  for (i = 0; i < concat->num_subdev; i++) {
609  struct mtd_info *subdev = concat->subdev[i];
610 
611  if (ofs >= subdev->size) {
612  ofs -= subdev->size;
613  continue;
614  }
615 
616  res = mtd_block_isbad(subdev, ofs);
617  break;
618  }
619 
620  return res;
621 }
622 
623 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
624 {
625  struct mtd_concat *concat = CONCAT(mtd);
626  int i, err = -EINVAL;
627 
628  for (i = 0; i < concat->num_subdev; i++) {
629  struct mtd_info *subdev = concat->subdev[i];
630 
631  if (ofs >= subdev->size) {
632  ofs -= subdev->size;
633  continue;
634  }
635 
636  err = mtd_block_markbad(subdev, ofs);
637  if (!err)
638  mtd->ecc_stats.badblocks++;
639  break;
640  }
641 
642  return err;
643 }
644 
645 /*
646  * try to support NOMMU mmaps on concatenated devices
647  * - we don't support subdev spanning as we can't guarantee it'll work
648  */
649 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
650  unsigned long len,
651  unsigned long offset,
652  unsigned long flags)
653 {
654  struct mtd_concat *concat = CONCAT(mtd);
655  int i;
656 
657  for (i = 0; i < concat->num_subdev; i++) {
658  struct mtd_info *subdev = concat->subdev[i];
659 
660  if (offset >= subdev->size) {
661  offset -= subdev->size;
662  continue;
663  }
664 
665  return mtd_get_unmapped_area(subdev, len, offset, flags);
666  }
667 
668  return (unsigned long) -ENOSYS;
669 }
670 
671 /*
672  * This function constructs a virtual MTD device by concatenating
673  * num_devs MTD devices. A pointer to the new device object is
674  * stored to *new_dev upon success. This function does _not_
675  * register any devices: this is the caller's responsibility.
676  */
677 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
678  int num_devs, /* number of subdevices */
679  const char *name)
680 { /* name for the new device */
681  int i;
682  size_t size;
683  struct mtd_concat *concat;
684  uint32_t max_erasesize, curr_erasesize;
685  int num_erase_region;
686  int max_writebufsize = 0;
687 
688  printk(KERN_NOTICE "Concatenating MTD devices:\n");
689  for (i = 0; i < num_devs; i++)
690  printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
691  printk(KERN_NOTICE "into device \"%s\"\n", name);
692 
693  /* allocate the device structure */
694  size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
695  concat = kzalloc(size, GFP_KERNEL);
696  if (!concat) {
697  printk
698  ("memory allocation error while creating concatenated device \"%s\"\n",
699  name);
700  return NULL;
701  }
702  concat->subdev = (struct mtd_info **) (concat + 1);
703 
704  /*
705  * Set up the new "super" device's MTD object structure, check for
706  * incompatibilities between the subdevices.
707  */
708  concat->mtd.type = subdev[0]->type;
709  concat->mtd.flags = subdev[0]->flags;
710  concat->mtd.size = subdev[0]->size;
711  concat->mtd.erasesize = subdev[0]->erasesize;
712  concat->mtd.writesize = subdev[0]->writesize;
713 
714  for (i = 0; i < num_devs; i++)
715  if (max_writebufsize < subdev[i]->writebufsize)
716  max_writebufsize = subdev[i]->writebufsize;
717  concat->mtd.writebufsize = max_writebufsize;
718 
719  concat->mtd.subpage_sft = subdev[0]->subpage_sft;
720  concat->mtd.oobsize = subdev[0]->oobsize;
721  concat->mtd.oobavail = subdev[0]->oobavail;
722  if (subdev[0]->_writev)
723  concat->mtd._writev = concat_writev;
724  if (subdev[0]->_read_oob)
725  concat->mtd._read_oob = concat_read_oob;
726  if (subdev[0]->_write_oob)
727  concat->mtd._write_oob = concat_write_oob;
728  if (subdev[0]->_block_isbad)
729  concat->mtd._block_isbad = concat_block_isbad;
730  if (subdev[0]->_block_markbad)
731  concat->mtd._block_markbad = concat_block_markbad;
732 
733  concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
734 
735  concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
736 
737  concat->subdev[0] = subdev[0];
738 
739  for (i = 1; i < num_devs; i++) {
740  if (concat->mtd.type != subdev[i]->type) {
741  kfree(concat);
742  printk("Incompatible device type on \"%s\"\n",
743  subdev[i]->name);
744  return NULL;
745  }
746  if (concat->mtd.flags != subdev[i]->flags) {
747  /*
748  * Expect all flags except MTD_WRITEABLE to be
749  * equal on all subdevices.
750  */
751  if ((concat->mtd.flags ^ subdev[i]->
752  flags) & ~MTD_WRITEABLE) {
753  kfree(concat);
754  printk("Incompatible device flags on \"%s\"\n",
755  subdev[i]->name);
756  return NULL;
757  } else
758  /* if writeable attribute differs,
759  make super device writeable */
760  concat->mtd.flags |=
761  subdev[i]->flags & MTD_WRITEABLE;
762  }
763 
764  /* only permit direct mapping if the BDIs are all the same
765  * - copy-mapping is still permitted
766  */
767  if (concat->mtd.backing_dev_info !=
768  subdev[i]->backing_dev_info)
769  concat->mtd.backing_dev_info =
771 
772  concat->mtd.size += subdev[i]->size;
773  concat->mtd.ecc_stats.badblocks +=
774  subdev[i]->ecc_stats.badblocks;
775  if (concat->mtd.writesize != subdev[i]->writesize ||
776  concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
777  concat->mtd.oobsize != subdev[i]->oobsize ||
778  !concat->mtd._read_oob != !subdev[i]->_read_oob ||
779  !concat->mtd._write_oob != !subdev[i]->_write_oob) {
780  kfree(concat);
781  printk("Incompatible OOB or ECC data on \"%s\"\n",
782  subdev[i]->name);
783  return NULL;
784  }
785  concat->subdev[i] = subdev[i];
786 
787  }
788 
789  concat->mtd.ecclayout = subdev[0]->ecclayout;
790 
791  concat->num_subdev = num_devs;
792  concat->mtd.name = name;
793 
794  concat->mtd._erase = concat_erase;
795  concat->mtd._read = concat_read;
796  concat->mtd._write = concat_write;
797  concat->mtd._sync = concat_sync;
798  concat->mtd._lock = concat_lock;
799  concat->mtd._unlock = concat_unlock;
800  concat->mtd._suspend = concat_suspend;
801  concat->mtd._resume = concat_resume;
802  concat->mtd._get_unmapped_area = concat_get_unmapped_area;
803 
804  /*
805  * Combine the erase block size info of the subdevices:
806  *
807  * first, walk the map of the new device and see how
808  * many changes in erase size we have
809  */
810  max_erasesize = curr_erasesize = subdev[0]->erasesize;
811  num_erase_region = 1;
812  for (i = 0; i < num_devs; i++) {
813  if (subdev[i]->numeraseregions == 0) {
814  /* current subdevice has uniform erase size */
815  if (subdev[i]->erasesize != curr_erasesize) {
816  /* if it differs from the last subdevice's erase size, count it */
817  ++num_erase_region;
818  curr_erasesize = subdev[i]->erasesize;
819  if (curr_erasesize > max_erasesize)
820  max_erasesize = curr_erasesize;
821  }
822  } else {
823  /* current subdevice has variable erase size */
824  int j;
825  for (j = 0; j < subdev[i]->numeraseregions; j++) {
826 
827  /* walk the list of erase regions, count any changes */
828  if (subdev[i]->eraseregions[j].erasesize !=
829  curr_erasesize) {
830  ++num_erase_region;
831  curr_erasesize =
832  subdev[i]->eraseregions[j].
833  erasesize;
834  if (curr_erasesize > max_erasesize)
835  max_erasesize = curr_erasesize;
836  }
837  }
838  }
839  }
840 
841  if (num_erase_region == 1) {
842  /*
843  * All subdevices have the same uniform erase size.
844  * This is easy:
845  */
846  concat->mtd.erasesize = curr_erasesize;
847  concat->mtd.numeraseregions = 0;
848  } else {
849  uint64_t tmp64;
850 
851  /*
852  * erase block size varies across the subdevices: allocate
853  * space to store the data describing the variable erase regions
854  */
855  struct mtd_erase_region_info *erase_region_p;
856  uint64_t begin, position;
857 
858  concat->mtd.erasesize = max_erasesize;
859  concat->mtd.numeraseregions = num_erase_region;
860  concat->mtd.eraseregions = erase_region_p =
861  kmalloc(num_erase_region *
862  sizeof (struct mtd_erase_region_info), GFP_KERNEL);
863  if (!erase_region_p) {
864  kfree(concat);
865  printk
866  ("memory allocation error while creating erase region list"
867  " for device \"%s\"\n", name);
868  return NULL;
869  }
870 
871  /*
872  * walk the map of the new device once more and fill in
873  * in erase region info:
874  */
875  curr_erasesize = subdev[0]->erasesize;
876  begin = position = 0;
877  for (i = 0; i < num_devs; i++) {
878  if (subdev[i]->numeraseregions == 0) {
879  /* current subdevice has uniform erase size */
880  if (subdev[i]->erasesize != curr_erasesize) {
881  /*
882  * fill in an mtd_erase_region_info structure for the area
883  * we have walked so far:
884  */
885  erase_region_p->offset = begin;
886  erase_region_p->erasesize =
887  curr_erasesize;
888  tmp64 = position - begin;
889  do_div(tmp64, curr_erasesize);
890  erase_region_p->numblocks = tmp64;
891  begin = position;
892 
893  curr_erasesize = subdev[i]->erasesize;
894  ++erase_region_p;
895  }
896  position += subdev[i]->size;
897  } else {
898  /* current subdevice has variable erase size */
899  int j;
900  for (j = 0; j < subdev[i]->numeraseregions; j++) {
901  /* walk the list of erase regions, count any changes */
902  if (subdev[i]->eraseregions[j].
903  erasesize != curr_erasesize) {
904  erase_region_p->offset = begin;
905  erase_region_p->erasesize =
906  curr_erasesize;
907  tmp64 = position - begin;
908  do_div(tmp64, curr_erasesize);
909  erase_region_p->numblocks = tmp64;
910  begin = position;
911 
912  curr_erasesize =
913  subdev[i]->eraseregions[j].
914  erasesize;
915  ++erase_region_p;
916  }
917  position +=
918  subdev[i]->eraseregions[j].
919  numblocks * (uint64_t)curr_erasesize;
920  }
921  }
922  }
923  /* Now write the final entry */
924  erase_region_p->offset = begin;
925  erase_region_p->erasesize = curr_erasesize;
926  tmp64 = position - begin;
927  do_div(tmp64, curr_erasesize);
928  erase_region_p->numblocks = tmp64;
929  }
930 
931  return &concat->mtd;
932 }
933 
934 /*
935  * This function destroys an MTD object obtained from concat_mtd_devs()
936  */
937 
938 void mtd_concat_destroy(struct mtd_info *mtd)
939 {
940  struct mtd_concat *concat = CONCAT(mtd);
941  if (concat->mtd.numeraseregions)
942  kfree(concat->mtd.eraseregions);
943  kfree(concat);
944 }
945 
948 
949 MODULE_LICENSE("GPL");
950 MODULE_AUTHOR("Robert Kaiser <[email protected]>");
951 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");