Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lpt.c
Go to the documentation of this file.
1 /*
2  * This file is part of UBIFS.
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 51
17  * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18  *
19  * Authors: Adrian Hunter
20  * Artem Bityutskiy (Битюцкий Артём)
21  */
22 
23 /*
24  * This file implements the LEB properties tree (LPT) area. The LPT area
25  * contains the LEB properties tree, a table of LPT area eraseblocks (ltab), and
26  * (for the "big" model) a table of saved LEB numbers (lsave). The LPT area sits
27  * between the log and the orphan area.
28  *
29  * The LPT area is like a miniature self-contained file system. It is required
30  * that it never runs out of space, is fast to access and update, and scales
31  * logarithmically. The LEB properties tree is implemented as a wandering tree
32  * much like the TNC, and the LPT area has its own garbage collection.
33  *
34  * The LPT has two slightly different forms called the "small model" and the
35  * "big model". The small model is used when the entire LEB properties table
36  * can be written into a single eraseblock. In that case, garbage collection
37  * consists of just writing the whole table, which therefore makes all other
38  * eraseblocks reusable. In the case of the big model, dirty eraseblocks are
39  * selected for garbage collection, which consists of marking the clean nodes in
40  * that LEB as dirty, and then only the dirty nodes are written out. Also, in
41  * the case of the big model, a table of LEB numbers is saved so that the entire
42  * LPT does not to be scanned looking for empty eraseblocks when UBIFS is first
43  * mounted.
44  */
45 
46 #include "ubifs.h"
47 #include <linux/crc16.h>
48 #include <linux/math64.h>
49 #include <linux/slab.h>
50 
58 static void do_calc_lpt_geom(struct ubifs_info *c)
59 {
60  int i, n, bits, per_leb_wastage, max_pnode_cnt;
61  long long sz, tot_wastage;
62 
63  n = c->main_lebs + c->max_leb_cnt - c->leb_cnt;
64  max_pnode_cnt = DIV_ROUND_UP(n, UBIFS_LPT_FANOUT);
65 
66  c->lpt_hght = 1;
67  n = UBIFS_LPT_FANOUT;
68  while (n < max_pnode_cnt) {
69  c->lpt_hght += 1;
71  }
72 
74 
76  c->nnode_cnt = n;
77  for (i = 1; i < c->lpt_hght; i++) {
79  c->nnode_cnt += n;
80  }
81 
82  c->space_bits = fls(c->leb_size) - 3;
83  c->lpt_lnum_bits = fls(c->lpt_lebs);
84  c->lpt_offs_bits = fls(c->leb_size - 1);
85  c->lpt_spc_bits = fls(c->leb_size);
86 
88  c->pcnt_bits = fls(n - 1);
89 
90  c->lnum_bits = fls(c->max_leb_cnt - 1);
91 
93  (c->big_lpt ? c->pcnt_bits : 0) +
94  (c->space_bits * 2 + 1) * UBIFS_LPT_FANOUT;
95  c->pnode_sz = (bits + 7) / 8;
96 
98  (c->big_lpt ? c->pcnt_bits : 0) +
100  c->nnode_sz = (bits + 7) / 8;
101 
103  c->lpt_lebs * c->lpt_spc_bits * 2;
104  c->ltab_sz = (bits + 7) / 8;
105 
107  c->lnum_bits * c->lsave_cnt;
108  c->lsave_sz = (bits + 7) / 8;
109 
110  /* Calculate the minimum LPT size */
111  c->lpt_sz = (long long)c->pnode_cnt * c->pnode_sz;
112  c->lpt_sz += (long long)c->nnode_cnt * c->nnode_sz;
113  c->lpt_sz += c->ltab_sz;
114  if (c->big_lpt)
115  c->lpt_sz += c->lsave_sz;
116 
117  /* Add wastage */
118  sz = c->lpt_sz;
119  per_leb_wastage = max_t(int, c->pnode_sz, c->nnode_sz);
120  sz += per_leb_wastage;
121  tot_wastage = per_leb_wastage;
122  while (sz > c->leb_size) {
123  sz += per_leb_wastage;
124  sz -= c->leb_size;
125  tot_wastage += per_leb_wastage;
126  }
127  tot_wastage += ALIGN(sz, c->min_io_size) - sz;
128  c->lpt_sz += tot_wastage;
129 }
130 
138 {
139  int lebs_needed;
140  long long sz;
141 
142  do_calc_lpt_geom(c);
143 
144  /* Verify that lpt_lebs is big enough */
145  sz = c->lpt_sz * 2; /* Must have at least 2 times the size */
146  lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size);
147  if (lebs_needed > c->lpt_lebs) {
148  ubifs_err("too few LPT LEBs");
149  return -EINVAL;
150  }
151 
152  /* Verify that ltab fits in a single LEB (since ltab is a single node */
153  if (c->ltab_sz > c->leb_size) {
154  ubifs_err("LPT ltab too big");
155  return -EINVAL;
156  }
157 
158  c->check_lpt_free = c->big_lpt;
159  return 0;
160 }
161 
174 static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs,
175  int *big_lpt)
176 {
177  int i, lebs_needed;
178  long long sz;
179 
180  /* Start by assuming the minimum number of LPT LEBs */
182  c->main_lebs = *main_lebs - c->lpt_lebs;
183  if (c->main_lebs <= 0)
184  return -EINVAL;
185 
186  /* And assume we will use the small LPT model */
187  c->big_lpt = 0;
188 
189  /*
190  * Calculate the geometry based on assumptions above and then see if it
191  * makes sense
192  */
193  do_calc_lpt_geom(c);
194 
195  /* Small LPT model must have lpt_sz < leb_size */
196  if (c->lpt_sz > c->leb_size) {
197  /* Nope, so try again using big LPT model */
198  c->big_lpt = 1;
199  do_calc_lpt_geom(c);
200  }
201 
202  /* Now check there are enough LPT LEBs */
203  for (i = 0; i < 64 ; i++) {
204  sz = c->lpt_sz * 4; /* Allow 4 times the size */
205  lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size);
206  if (lebs_needed > c->lpt_lebs) {
207  /* Not enough LPT LEBs so try again with more */
208  c->lpt_lebs = lebs_needed;
209  c->main_lebs = *main_lebs - c->lpt_lebs;
210  if (c->main_lebs <= 0)
211  return -EINVAL;
212  do_calc_lpt_geom(c);
213  continue;
214  }
215  if (c->ltab_sz > c->leb_size) {
216  ubifs_err("LPT ltab too big");
217  return -EINVAL;
218  }
219  *main_lebs = c->main_lebs;
220  *big_lpt = c->big_lpt;
221  return 0;
222  }
223  return -EINVAL;
224 }
225 
233 static void pack_bits(uint8_t **addr, int *pos, uint32_t val, int nrbits)
234 {
235  uint8_t *p = *addr;
236  int b = *pos;
237 
238  ubifs_assert(nrbits > 0);
239  ubifs_assert(nrbits <= 32);
240  ubifs_assert(*pos >= 0);
241  ubifs_assert(*pos < 8);
242  ubifs_assert((val >> nrbits) == 0 || nrbits == 32);
243  if (b) {
244  *p |= ((uint8_t)val) << b;
245  nrbits += b;
246  if (nrbits > 8) {
247  *++p = (uint8_t)(val >>= (8 - b));
248  if (nrbits > 16) {
249  *++p = (uint8_t)(val >>= 8);
250  if (nrbits > 24) {
251  *++p = (uint8_t)(val >>= 8);
252  if (nrbits > 32)
253  *++p = (uint8_t)(val >>= 8);
254  }
255  }
256  }
257  } else {
258  *p = (uint8_t)val;
259  if (nrbits > 8) {
260  *++p = (uint8_t)(val >>= 8);
261  if (nrbits > 16) {
262  *++p = (uint8_t)(val >>= 8);
263  if (nrbits > 24)
264  *++p = (uint8_t)(val >>= 8);
265  }
266  }
267  }
268  b = nrbits & 7;
269  if (b == 0)
270  p++;
271  *addr = p;
272  *pos = b;
273 }
274 
283 uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits)
284 {
285  const int k = 32 - nrbits;
286  uint8_t *p = *addr;
287  int b = *pos;
289  const int bytes = (nrbits + b + 7) >> 3;
290 
291  ubifs_assert(nrbits > 0);
292  ubifs_assert(nrbits <= 32);
293  ubifs_assert(*pos >= 0);
294  ubifs_assert(*pos < 8);
295  if (b) {
296  switch (bytes) {
297  case 2:
298  val = p[1];
299  break;
300  case 3:
301  val = p[1] | ((uint32_t)p[2] << 8);
302  break;
303  case 4:
304  val = p[1] | ((uint32_t)p[2] << 8) |
305  ((uint32_t)p[3] << 16);
306  break;
307  case 5:
308  val = p[1] | ((uint32_t)p[2] << 8) |
309  ((uint32_t)p[3] << 16) |
310  ((uint32_t)p[4] << 24);
311  }
312  val <<= (8 - b);
313  val |= *p >> b;
314  nrbits += b;
315  } else {
316  switch (bytes) {
317  case 1:
318  val = p[0];
319  break;
320  case 2:
321  val = p[0] | ((uint32_t)p[1] << 8);
322  break;
323  case 3:
324  val = p[0] | ((uint32_t)p[1] << 8) |
325  ((uint32_t)p[2] << 16);
326  break;
327  case 4:
328  val = p[0] | ((uint32_t)p[1] << 8) |
329  ((uint32_t)p[2] << 16) |
330  ((uint32_t)p[3] << 24);
331  break;
332  }
333  }
334  val <<= k;
335  val >>= k;
336  b = nrbits & 7;
337  p += nrbits >> 3;
338  *addr = p;
339  *pos = b;
340  ubifs_assert((val >> nrbits) == 0 || nrbits - b == 32);
341  return val;
342 }
343 
350 void ubifs_pack_pnode(struct ubifs_info *c, void *buf,
351  struct ubifs_pnode *pnode)
352 {
353  uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
354  int i, pos = 0;
355  uint16_t crc;
356 
357  pack_bits(&addr, &pos, UBIFS_LPT_PNODE, UBIFS_LPT_TYPE_BITS);
358  if (c->big_lpt)
359  pack_bits(&addr, &pos, pnode->num, c->pcnt_bits);
360  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
361  pack_bits(&addr, &pos, pnode->lprops[i].free >> 3,
362  c->space_bits);
363  pack_bits(&addr, &pos, pnode->lprops[i].dirty >> 3,
364  c->space_bits);
365  if (pnode->lprops[i].flags & LPROPS_INDEX)
366  pack_bits(&addr, &pos, 1, 1);
367  else
368  pack_bits(&addr, &pos, 0, 1);
369  }
370  crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
371  c->pnode_sz - UBIFS_LPT_CRC_BYTES);
372  addr = buf;
373  pos = 0;
374  pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
375 }
376 
383 void ubifs_pack_nnode(struct ubifs_info *c, void *buf,
384  struct ubifs_nnode *nnode)
385 {
386  uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
387  int i, pos = 0;
388  uint16_t crc;
389 
390  pack_bits(&addr, &pos, UBIFS_LPT_NNODE, UBIFS_LPT_TYPE_BITS);
391  if (c->big_lpt)
392  pack_bits(&addr, &pos, nnode->num, c->pcnt_bits);
393  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
394  int lnum = nnode->nbranch[i].lnum;
395 
396  if (lnum == 0)
397  lnum = c->lpt_last + 1;
398  pack_bits(&addr, &pos, lnum - c->lpt_first, c->lpt_lnum_bits);
399  pack_bits(&addr, &pos, nnode->nbranch[i].offs,
400  c->lpt_offs_bits);
401  }
402  crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
403  c->nnode_sz - UBIFS_LPT_CRC_BYTES);
404  addr = buf;
405  pos = 0;
406  pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
407 }
408 
415 void ubifs_pack_ltab(struct ubifs_info *c, void *buf,
416  struct ubifs_lpt_lprops *ltab)
417 {
418  uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
419  int i, pos = 0;
420  uint16_t crc;
421 
422  pack_bits(&addr, &pos, UBIFS_LPT_LTAB, UBIFS_LPT_TYPE_BITS);
423  for (i = 0; i < c->lpt_lebs; i++) {
424  pack_bits(&addr, &pos, ltab[i].free, c->lpt_spc_bits);
425  pack_bits(&addr, &pos, ltab[i].dirty, c->lpt_spc_bits);
426  }
427  crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
428  c->ltab_sz - UBIFS_LPT_CRC_BYTES);
429  addr = buf;
430  pos = 0;
431  pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
432 }
433 
440 void ubifs_pack_lsave(struct ubifs_info *c, void *buf, int *lsave)
441 {
442  uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
443  int i, pos = 0;
444  uint16_t crc;
445 
446  pack_bits(&addr, &pos, UBIFS_LPT_LSAVE, UBIFS_LPT_TYPE_BITS);
447  for (i = 0; i < c->lsave_cnt; i++)
448  pack_bits(&addr, &pos, lsave[i], c->lnum_bits);
449  crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
450  c->lsave_sz - UBIFS_LPT_CRC_BYTES);
451  addr = buf;
452  pos = 0;
453  pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS);
454 }
455 
462 void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty)
463 {
464  if (!dirty || !lnum)
465  return;
466  dbg_lp("LEB %d add %d to %d",
467  lnum, dirty, c->ltab[lnum - c->lpt_first].dirty);
468  ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last);
469  c->ltab[lnum - c->lpt_first].dirty += dirty;
470 }
471 
479 static void set_ltab(struct ubifs_info *c, int lnum, int free, int dirty)
480 {
481  dbg_lp("LEB %d free %d dirty %d to %d %d",
482  lnum, c->ltab[lnum - c->lpt_first].free,
483  c->ltab[lnum - c->lpt_first].dirty, free, dirty);
484  ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last);
485  c->ltab[lnum - c->lpt_first].free = free;
486  c->ltab[lnum - c->lpt_first].dirty = dirty;
487 }
488 
494 void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode)
495 {
496  struct ubifs_nnode *np = nnode->parent;
497 
498  if (np)
499  ubifs_add_lpt_dirt(c, np->nbranch[nnode->iip].lnum,
500  c->nnode_sz);
501  else {
503  if (!(c->lpt_drty_flgs & LTAB_DIRTY)) {
506  }
507  }
508 }
509 
515 static void add_pnode_dirt(struct ubifs_info *c, struct ubifs_pnode *pnode)
516 {
517  ubifs_add_lpt_dirt(c, pnode->parent->nbranch[pnode->iip].lnum,
518  c->pnode_sz);
519 }
520 
532 static int calc_nnode_num(int row, int col)
533 {
534  int num, bits;
535 
536  num = 1;
537  while (row--) {
538  bits = (col & (UBIFS_LPT_FANOUT - 1));
539  col >>= UBIFS_LPT_FANOUT_SHIFT;
540  num <<= UBIFS_LPT_FANOUT_SHIFT;
541  num |= bits;
542  }
543  return num;
544 }
545 
558 static int calc_nnode_num_from_parent(const struct ubifs_info *c,
559  struct ubifs_nnode *parent, int iip)
560 {
561  int num, shft;
562 
563  if (!parent)
564  return 1;
565  shft = (c->lpt_hght - parent->level) * UBIFS_LPT_FANOUT_SHIFT;
566  num = parent->num ^ (1 << shft);
567  num |= (UBIFS_LPT_FANOUT + iip) << shft;
568  return num;
569 }
570 
583 static int calc_pnode_num_from_parent(const struct ubifs_info *c,
584  struct ubifs_nnode *parent, int iip)
585 {
586  int i, n = c->lpt_hght - 1, pnum = parent->num, num = 0;
587 
588  for (i = 0; i < n; i++) {
589  num <<= UBIFS_LPT_FANOUT_SHIFT;
590  num |= pnum & (UBIFS_LPT_FANOUT - 1);
591  pnum >>= UBIFS_LPT_FANOUT_SHIFT;
592  }
593  num <<= UBIFS_LPT_FANOUT_SHIFT;
594  num |= iip;
595  return num;
596 }
597 
608 int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
609  int *lpt_lebs, int *big_lpt)
610 {
611  int lnum, err = 0, node_sz, iopos, i, j, cnt, len, alen, row;
612  int blnum, boffs, bsz, bcnt;
613  struct ubifs_pnode *pnode = NULL;
614  struct ubifs_nnode *nnode = NULL;
615  void *buf = NULL, *p;
616  struct ubifs_lpt_lprops *ltab = NULL;
617  int *lsave = NULL;
618 
619  err = calc_dflt_lpt_geom(c, main_lebs, big_lpt);
620  if (err)
621  return err;
622  *lpt_lebs = c->lpt_lebs;
623 
624  /* Needed by 'ubifs_pack_nnode()' and 'set_ltab()' */
625  c->lpt_first = lpt_first;
626  /* Needed by 'set_ltab()' */
627  c->lpt_last = lpt_first + c->lpt_lebs - 1;
628  /* Needed by 'ubifs_pack_lsave()' */
629  c->main_first = c->leb_cnt - *main_lebs;
630 
631  lsave = kmalloc(sizeof(int) * c->lsave_cnt, GFP_KERNEL);
632  pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_KERNEL);
633  nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_KERNEL);
634  buf = vmalloc(c->leb_size);
635  ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
636  if (!pnode || !nnode || !buf || !ltab || !lsave) {
637  err = -ENOMEM;
638  goto out;
639  }
640 
641  ubifs_assert(!c->ltab);
642  c->ltab = ltab; /* Needed by set_ltab */
643 
644  /* Initialize LPT's own lprops */
645  for (i = 0; i < c->lpt_lebs; i++) {
646  ltab[i].free = c->leb_size;
647  ltab[i].dirty = 0;
648  ltab[i].tgc = 0;
649  ltab[i].cmt = 0;
650  }
651 
652  lnum = lpt_first;
653  p = buf;
654  /* Number of leaf nodes (pnodes) */
655  cnt = c->pnode_cnt;
656 
657  /*
658  * The first pnode contains the LEB properties for the LEBs that contain
659  * the root inode node and the root index node of the index tree.
660  */
661  node_sz = ALIGN(ubifs_idx_node_sz(c, 1), 8);
662  iopos = ALIGN(node_sz, c->min_io_size);
663  pnode->lprops[0].free = c->leb_size - iopos;
664  pnode->lprops[0].dirty = iopos - node_sz;
665  pnode->lprops[0].flags = LPROPS_INDEX;
666 
667  node_sz = UBIFS_INO_NODE_SZ;
668  iopos = ALIGN(node_sz, c->min_io_size);
669  pnode->lprops[1].free = c->leb_size - iopos;
670  pnode->lprops[1].dirty = iopos - node_sz;
671 
672  for (i = 2; i < UBIFS_LPT_FANOUT; i++)
673  pnode->lprops[i].free = c->leb_size;
674 
675  /* Add first pnode */
676  ubifs_pack_pnode(c, p, pnode);
677  p += c->pnode_sz;
678  len = c->pnode_sz;
679  pnode->num += 1;
680 
681  /* Reset pnode values for remaining pnodes */
682  pnode->lprops[0].free = c->leb_size;
683  pnode->lprops[0].dirty = 0;
684  pnode->lprops[0].flags = 0;
685 
686  pnode->lprops[1].free = c->leb_size;
687  pnode->lprops[1].dirty = 0;
688 
689  /*
690  * To calculate the internal node branches, we keep information about
691  * the level below.
692  */
693  blnum = lnum; /* LEB number of level below */
694  boffs = 0; /* Offset of level below */
695  bcnt = cnt; /* Number of nodes in level below */
696  bsz = c->pnode_sz; /* Size of nodes in level below */
697 
698  /* Add all remaining pnodes */
699  for (i = 1; i < cnt; i++) {
700  if (len + c->pnode_sz > c->leb_size) {
701  alen = ALIGN(len, c->min_io_size);
702  set_ltab(c, lnum, c->leb_size - alen, alen - len);
703  memset(p, 0xff, alen - len);
704  err = ubifs_leb_change(c, lnum++, buf, alen);
705  if (err)
706  goto out;
707  p = buf;
708  len = 0;
709  }
710  ubifs_pack_pnode(c, p, pnode);
711  p += c->pnode_sz;
712  len += c->pnode_sz;
713  /*
714  * pnodes are simply numbered left to right starting at zero,
715  * which means the pnode number can be used easily to traverse
716  * down the tree to the corresponding pnode.
717  */
718  pnode->num += 1;
719  }
720 
721  row = 0;
722  for (i = UBIFS_LPT_FANOUT; cnt > i; i <<= UBIFS_LPT_FANOUT_SHIFT)
723  row += 1;
724  /* Add all nnodes, one level at a time */
725  while (1) {
726  /* Number of internal nodes (nnodes) at next level */
727  cnt = DIV_ROUND_UP(cnt, UBIFS_LPT_FANOUT);
728  for (i = 0; i < cnt; i++) {
729  if (len + c->nnode_sz > c->leb_size) {
730  alen = ALIGN(len, c->min_io_size);
731  set_ltab(c, lnum, c->leb_size - alen,
732  alen - len);
733  memset(p, 0xff, alen - len);
734  err = ubifs_leb_change(c, lnum++, buf, alen);
735  if (err)
736  goto out;
737  p = buf;
738  len = 0;
739  }
740  /* Only 1 nnode at this level, so it is the root */
741  if (cnt == 1) {
742  c->lpt_lnum = lnum;
743  c->lpt_offs = len;
744  }
745  /* Set branches to the level below */
746  for (j = 0; j < UBIFS_LPT_FANOUT; j++) {
747  if (bcnt) {
748  if (boffs + bsz > c->leb_size) {
749  blnum += 1;
750  boffs = 0;
751  }
752  nnode->nbranch[j].lnum = blnum;
753  nnode->nbranch[j].offs = boffs;
754  boffs += bsz;
755  bcnt--;
756  } else {
757  nnode->nbranch[j].lnum = 0;
758  nnode->nbranch[j].offs = 0;
759  }
760  }
761  nnode->num = calc_nnode_num(row, i);
762  ubifs_pack_nnode(c, p, nnode);
763  p += c->nnode_sz;
764  len += c->nnode_sz;
765  }
766  /* Only 1 nnode at this level, so it is the root */
767  if (cnt == 1)
768  break;
769  /* Update the information about the level below */
770  bcnt = cnt;
771  bsz = c->nnode_sz;
772  row -= 1;
773  }
774 
775  if (*big_lpt) {
776  /* Need to add LPT's save table */
777  if (len + c->lsave_sz > c->leb_size) {
778  alen = ALIGN(len, c->min_io_size);
779  set_ltab(c, lnum, c->leb_size - alen, alen - len);
780  memset(p, 0xff, alen - len);
781  err = ubifs_leb_change(c, lnum++, buf, alen);
782  if (err)
783  goto out;
784  p = buf;
785  len = 0;
786  }
787 
788  c->lsave_lnum = lnum;
789  c->lsave_offs = len;
790 
791  for (i = 0; i < c->lsave_cnt && i < *main_lebs; i++)
792  lsave[i] = c->main_first + i;
793  for (; i < c->lsave_cnt; i++)
794  lsave[i] = c->main_first;
795 
796  ubifs_pack_lsave(c, p, lsave);
797  p += c->lsave_sz;
798  len += c->lsave_sz;
799  }
800 
801  /* Need to add LPT's own LEB properties table */
802  if (len + c->ltab_sz > c->leb_size) {
803  alen = ALIGN(len, c->min_io_size);
804  set_ltab(c, lnum, c->leb_size - alen, alen - len);
805  memset(p, 0xff, alen - len);
806  err = ubifs_leb_change(c, lnum++, buf, alen);
807  if (err)
808  goto out;
809  p = buf;
810  len = 0;
811  }
812 
813  c->ltab_lnum = lnum;
814  c->ltab_offs = len;
815 
816  /* Update ltab before packing it */
817  len += c->ltab_sz;
818  alen = ALIGN(len, c->min_io_size);
819  set_ltab(c, lnum, c->leb_size - alen, alen - len);
820 
821  ubifs_pack_ltab(c, p, ltab);
822  p += c->ltab_sz;
823 
824  /* Write remaining buffer */
825  memset(p, 0xff, alen - len);
826  err = ubifs_leb_change(c, lnum, buf, alen);
827  if (err)
828  goto out;
829 
830  c->nhead_lnum = lnum;
831  c->nhead_offs = ALIGN(len, c->min_io_size);
832 
833  dbg_lp("space_bits %d", c->space_bits);
834  dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits);
835  dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits);
836  dbg_lp("lpt_spc_bits %d", c->lpt_spc_bits);
837  dbg_lp("pcnt_bits %d", c->pcnt_bits);
838  dbg_lp("lnum_bits %d", c->lnum_bits);
839  dbg_lp("pnode_sz %d", c->pnode_sz);
840  dbg_lp("nnode_sz %d", c->nnode_sz);
841  dbg_lp("ltab_sz %d", c->ltab_sz);
842  dbg_lp("lsave_sz %d", c->lsave_sz);
843  dbg_lp("lsave_cnt %d", c->lsave_cnt);
844  dbg_lp("lpt_hght %d", c->lpt_hght);
845  dbg_lp("big_lpt %d", c->big_lpt);
846  dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs);
847  dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs);
848  dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs);
849  if (c->big_lpt)
850  dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs);
851 out:
852  c->ltab = NULL;
853  kfree(lsave);
854  vfree(ltab);
855  vfree(buf);
856  kfree(nnode);
857  kfree(pnode);
858  return err;
859 }
860 
869 static void update_cats(struct ubifs_info *c, struct ubifs_pnode *pnode)
870 {
871  int i;
872 
873  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
874  int cat = pnode->lprops[i].flags & LPROPS_CAT_MASK;
875  int lnum = pnode->lprops[i].lnum;
876 
877  if (!lnum)
878  return;
879  ubifs_add_to_cat(c, &pnode->lprops[i], cat);
880  }
881 }
882 
893 static void replace_cats(struct ubifs_info *c, struct ubifs_pnode *old_pnode,
894  struct ubifs_pnode *new_pnode)
895 {
896  int i;
897 
898  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
899  if (!new_pnode->lprops[i].lnum)
900  return;
901  ubifs_replace_cat(c, &old_pnode->lprops[i],
902  &new_pnode->lprops[i]);
903  }
904 }
905 
914 static int check_lpt_crc(void *buf, int len)
915 {
916  int pos = 0;
917  uint8_t *addr = buf;
918  uint16_t crc, calc_crc;
919 
920  crc = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_CRC_BITS);
921  calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
922  len - UBIFS_LPT_CRC_BYTES);
923  if (crc != calc_crc) {
924  ubifs_err("invalid crc in LPT node: crc %hx calc %hx", crc,
925  calc_crc);
926  dump_stack();
927  return -EINVAL;
928  }
929  return 0;
930 }
931 
941 static int check_lpt_type(uint8_t **addr, int *pos, int type)
942 {
943  int node_type;
944 
945  node_type = ubifs_unpack_bits(addr, pos, UBIFS_LPT_TYPE_BITS);
946  if (node_type != type) {
947  ubifs_err("invalid type (%d) in LPT node type %d", node_type,
948  type);
949  dump_stack();
950  return -EINVAL;
951  }
952  return 0;
953 }
954 
963 static int unpack_pnode(const struct ubifs_info *c, void *buf,
964  struct ubifs_pnode *pnode)
965 {
966  uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
967  int i, pos = 0, err;
968 
969  err = check_lpt_type(&addr, &pos, UBIFS_LPT_PNODE);
970  if (err)
971  return err;
972  if (c->big_lpt)
973  pnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits);
974  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
975  struct ubifs_lprops * const lprops = &pnode->lprops[i];
976 
977  lprops->free = ubifs_unpack_bits(&addr, &pos, c->space_bits);
978  lprops->free <<= 3;
979  lprops->dirty = ubifs_unpack_bits(&addr, &pos, c->space_bits);
980  lprops->dirty <<= 3;
981 
982  if (ubifs_unpack_bits(&addr, &pos, 1))
983  lprops->flags = LPROPS_INDEX;
984  else
985  lprops->flags = 0;
986  lprops->flags |= ubifs_categorize_lprops(c, lprops);
987  }
988  err = check_lpt_crc(buf, c->pnode_sz);
989  return err;
990 }
991 
1000 int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
1001  struct ubifs_nnode *nnode)
1002 {
1003  uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
1004  int i, pos = 0, err;
1005 
1006  err = check_lpt_type(&addr, &pos, UBIFS_LPT_NNODE);
1007  if (err)
1008  return err;
1009  if (c->big_lpt)
1010  nnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits);
1011  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1012  int lnum;
1013 
1014  lnum = ubifs_unpack_bits(&addr, &pos, c->lpt_lnum_bits) +
1015  c->lpt_first;
1016  if (lnum == c->lpt_last + 1)
1017  lnum = 0;
1018  nnode->nbranch[i].lnum = lnum;
1019  nnode->nbranch[i].offs = ubifs_unpack_bits(&addr, &pos,
1020  c->lpt_offs_bits);
1021  }
1022  err = check_lpt_crc(buf, c->nnode_sz);
1023  return err;
1024 }
1025 
1033 static int unpack_ltab(const struct ubifs_info *c, void *buf)
1034 {
1035  uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
1036  int i, pos = 0, err;
1037 
1038  err = check_lpt_type(&addr, &pos, UBIFS_LPT_LTAB);
1039  if (err)
1040  return err;
1041  for (i = 0; i < c->lpt_lebs; i++) {
1042  int free = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits);
1043  int dirty = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits);
1044 
1045  if (free < 0 || free > c->leb_size || dirty < 0 ||
1046  dirty > c->leb_size || free + dirty > c->leb_size)
1047  return -EINVAL;
1048 
1049  c->ltab[i].free = free;
1050  c->ltab[i].dirty = dirty;
1051  c->ltab[i].tgc = 0;
1052  c->ltab[i].cmt = 0;
1053  }
1054  err = check_lpt_crc(buf, c->ltab_sz);
1055  return err;
1056 }
1057 
1065 static int unpack_lsave(const struct ubifs_info *c, void *buf)
1066 {
1067  uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
1068  int i, pos = 0, err;
1069 
1070  err = check_lpt_type(&addr, &pos, UBIFS_LPT_LSAVE);
1071  if (err)
1072  return err;
1073  for (i = 0; i < c->lsave_cnt; i++) {
1074  int lnum = ubifs_unpack_bits(&addr, &pos, c->lnum_bits);
1075 
1076  if (lnum < c->main_first || lnum >= c->leb_cnt)
1077  return -EINVAL;
1078  c->lsave[i] = lnum;
1079  }
1080  err = check_lpt_crc(buf, c->lsave_sz);
1081  return err;
1082 }
1083 
1093 static int validate_nnode(const struct ubifs_info *c, struct ubifs_nnode *nnode,
1094  struct ubifs_nnode *parent, int iip)
1095 {
1096  int i, lvl, max_offs;
1097 
1098  if (c->big_lpt) {
1099  int num = calc_nnode_num_from_parent(c, parent, iip);
1100 
1101  if (nnode->num != num)
1102  return -EINVAL;
1103  }
1104  lvl = parent ? parent->level - 1 : c->lpt_hght;
1105  if (lvl < 1)
1106  return -EINVAL;
1107  if (lvl == 1)
1108  max_offs = c->leb_size - c->pnode_sz;
1109  else
1110  max_offs = c->leb_size - c->nnode_sz;
1111  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1112  int lnum = nnode->nbranch[i].lnum;
1113  int offs = nnode->nbranch[i].offs;
1114 
1115  if (lnum == 0) {
1116  if (offs != 0)
1117  return -EINVAL;
1118  continue;
1119  }
1120  if (lnum < c->lpt_first || lnum > c->lpt_last)
1121  return -EINVAL;
1122  if (offs < 0 || offs > max_offs)
1123  return -EINVAL;
1124  }
1125  return 0;
1126 }
1127 
1137 static int validate_pnode(const struct ubifs_info *c, struct ubifs_pnode *pnode,
1138  struct ubifs_nnode *parent, int iip)
1139 {
1140  int i;
1141 
1142  if (c->big_lpt) {
1143  int num = calc_pnode_num_from_parent(c, parent, iip);
1144 
1145  if (pnode->num != num)
1146  return -EINVAL;
1147  }
1148  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1149  int free = pnode->lprops[i].free;
1150  int dirty = pnode->lprops[i].dirty;
1151 
1152  if (free < 0 || free > c->leb_size || free % c->min_io_size ||
1153  (free & 7))
1154  return -EINVAL;
1155  if (dirty < 0 || dirty > c->leb_size || (dirty & 7))
1156  return -EINVAL;
1157  if (dirty + free > c->leb_size)
1158  return -EINVAL;
1159  }
1160  return 0;
1161 }
1162 
1171 static void set_pnode_lnum(const struct ubifs_info *c,
1172  struct ubifs_pnode *pnode)
1173 {
1174  int i, lnum;
1175 
1176  lnum = (pnode->num << UBIFS_LPT_FANOUT_SHIFT) + c->main_first;
1177  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1178  if (lnum >= c->leb_cnt)
1179  return;
1180  pnode->lprops[i].lnum = lnum++;
1181  }
1182 }
1183 
1192 int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
1193 {
1194  struct ubifs_nbranch *branch = NULL;
1195  struct ubifs_nnode *nnode = NULL;
1196  void *buf = c->lpt_nod_buf;
1197  int err, lnum, offs;
1198 
1199  if (parent) {
1200  branch = &parent->nbranch[iip];
1201  lnum = branch->lnum;
1202  offs = branch->offs;
1203  } else {
1204  lnum = c->lpt_lnum;
1205  offs = c->lpt_offs;
1206  }
1207  nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_NOFS);
1208  if (!nnode) {
1209  err = -ENOMEM;
1210  goto out;
1211  }
1212  if (lnum == 0) {
1213  /*
1214  * This nnode was not written which just means that the LEB
1215  * properties in the subtree below it describe empty LEBs. We
1216  * make the nnode as though we had read it, which in fact means
1217  * doing almost nothing.
1218  */
1219  if (c->big_lpt)
1220  nnode->num = calc_nnode_num_from_parent(c, parent, iip);
1221  } else {
1222  err = ubifs_leb_read(c, lnum, buf, offs, c->nnode_sz, 1);
1223  if (err)
1224  goto out;
1225  err = ubifs_unpack_nnode(c, buf, nnode);
1226  if (err)
1227  goto out;
1228  }
1229  err = validate_nnode(c, nnode, parent, iip);
1230  if (err)
1231  goto out;
1232  if (!c->big_lpt)
1233  nnode->num = calc_nnode_num_from_parent(c, parent, iip);
1234  if (parent) {
1235  branch->nnode = nnode;
1236  nnode->level = parent->level - 1;
1237  } else {
1238  c->nroot = nnode;
1239  nnode->level = c->lpt_hght;
1240  }
1241  nnode->parent = parent;
1242  nnode->iip = iip;
1243  return 0;
1244 
1245 out:
1246  ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs);
1247  dump_stack();
1248  kfree(nnode);
1249  return err;
1250 }
1251 
1260 static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
1261 {
1262  struct ubifs_nbranch *branch;
1263  struct ubifs_pnode *pnode = NULL;
1264  void *buf = c->lpt_nod_buf;
1265  int err, lnum, offs;
1266 
1267  branch = &parent->nbranch[iip];
1268  lnum = branch->lnum;
1269  offs = branch->offs;
1270  pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS);
1271  if (!pnode)
1272  return -ENOMEM;
1273 
1274  if (lnum == 0) {
1275  /*
1276  * This pnode was not written which just means that the LEB
1277  * properties in it describe empty LEBs. We make the pnode as
1278  * though we had read it.
1279  */
1280  int i;
1281 
1282  if (c->big_lpt)
1283  pnode->num = calc_pnode_num_from_parent(c, parent, iip);
1284  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1285  struct ubifs_lprops * const lprops = &pnode->lprops[i];
1286 
1287  lprops->free = c->leb_size;
1288  lprops->flags = ubifs_categorize_lprops(c, lprops);
1289  }
1290  } else {
1291  err = ubifs_leb_read(c, lnum, buf, offs, c->pnode_sz, 1);
1292  if (err)
1293  goto out;
1294  err = unpack_pnode(c, buf, pnode);
1295  if (err)
1296  goto out;
1297  }
1298  err = validate_pnode(c, pnode, parent, iip);
1299  if (err)
1300  goto out;
1301  if (!c->big_lpt)
1302  pnode->num = calc_pnode_num_from_parent(c, parent, iip);
1303  branch->pnode = pnode;
1304  pnode->parent = parent;
1305  pnode->iip = iip;
1306  set_pnode_lnum(c, pnode);
1307  c->pnodes_have += 1;
1308  return 0;
1309 
1310 out:
1311  ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs);
1312  ubifs_dump_pnode(c, pnode, parent, iip);
1313  dump_stack();
1314  ubifs_err("calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
1315  kfree(pnode);
1316  return err;
1317 }
1318 
1325 static int read_ltab(struct ubifs_info *c)
1326 {
1327  int err;
1328  void *buf;
1329 
1330  buf = vmalloc(c->ltab_sz);
1331  if (!buf)
1332  return -ENOMEM;
1333  err = ubifs_leb_read(c, c->ltab_lnum, buf, c->ltab_offs, c->ltab_sz, 1);
1334  if (err)
1335  goto out;
1336  err = unpack_ltab(c, buf);
1337 out:
1338  vfree(buf);
1339  return err;
1340 }
1341 
1348 static int read_lsave(struct ubifs_info *c)
1349 {
1350  int err, i;
1351  void *buf;
1352 
1353  buf = vmalloc(c->lsave_sz);
1354  if (!buf)
1355  return -ENOMEM;
1356  err = ubifs_leb_read(c, c->lsave_lnum, buf, c->lsave_offs,
1357  c->lsave_sz, 1);
1358  if (err)
1359  goto out;
1360  err = unpack_lsave(c, buf);
1361  if (err)
1362  goto out;
1363  for (i = 0; i < c->lsave_cnt; i++) {
1364  int lnum = c->lsave[i];
1365  struct ubifs_lprops *lprops;
1366 
1367  /*
1368  * Due to automatic resizing, the values in the lsave table
1369  * could be beyond the volume size - just ignore them.
1370  */
1371  if (lnum >= c->leb_cnt)
1372  continue;
1373  lprops = ubifs_lpt_lookup(c, lnum);
1374  if (IS_ERR(lprops)) {
1375  err = PTR_ERR(lprops);
1376  goto out;
1377  }
1378  }
1379 out:
1380  vfree(buf);
1381  return err;
1382 }
1383 
1394  struct ubifs_nnode *parent, int iip)
1395 {
1396  struct ubifs_nbranch *branch;
1397  struct ubifs_nnode *nnode;
1398  int err;
1399 
1400  branch = &parent->nbranch[iip];
1401  nnode = branch->nnode;
1402  if (nnode)
1403  return nnode;
1404  err = ubifs_read_nnode(c, parent, iip);
1405  if (err)
1406  return ERR_PTR(err);
1407  return branch->nnode;
1408 }
1409 
1420  struct ubifs_nnode *parent, int iip)
1421 {
1422  struct ubifs_nbranch *branch;
1423  struct ubifs_pnode *pnode;
1424  int err;
1425 
1426  branch = &parent->nbranch[iip];
1427  pnode = branch->pnode;
1428  if (pnode)
1429  return pnode;
1430  err = read_pnode(c, parent, iip);
1431  if (err)
1432  return ERR_PTR(err);
1433  update_cats(c, branch->pnode);
1434  return branch->pnode;
1435 }
1436 
1445 struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum)
1446 {
1447  int err, i, h, iip, shft;
1448  struct ubifs_nnode *nnode;
1449  struct ubifs_pnode *pnode;
1450 
1451  if (!c->nroot) {
1452  err = ubifs_read_nnode(c, NULL, 0);
1453  if (err)
1454  return ERR_PTR(err);
1455  }
1456  nnode = c->nroot;
1457  i = lnum - c->main_first;
1458  shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
1459  for (h = 1; h < c->lpt_hght; h++) {
1460  iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
1461  shft -= UBIFS_LPT_FANOUT_SHIFT;
1462  nnode = ubifs_get_nnode(c, nnode, iip);
1463  if (IS_ERR(nnode))
1464  return ERR_CAST(nnode);
1465  }
1466  iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
1467  shft -= UBIFS_LPT_FANOUT_SHIFT;
1468  pnode = ubifs_get_pnode(c, nnode, iip);
1469  if (IS_ERR(pnode))
1470  return ERR_CAST(pnode);
1471  iip = (i & (UBIFS_LPT_FANOUT - 1));
1472  dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum,
1473  pnode->lprops[iip].free, pnode->lprops[iip].dirty,
1474  pnode->lprops[iip].flags);
1475  return &pnode->lprops[iip];
1476 }
1477 
1485 static struct ubifs_nnode *dirty_cow_nnode(struct ubifs_info *c,
1486  struct ubifs_nnode *nnode)
1487 {
1488  struct ubifs_nnode *n;
1489  int i;
1490 
1491  if (!test_bit(COW_CNODE, &nnode->flags)) {
1492  /* nnode is not being committed */
1493  if (!test_and_set_bit(DIRTY_CNODE, &nnode->flags)) {
1494  c->dirty_nn_cnt += 1;
1495  ubifs_add_nnode_dirt(c, nnode);
1496  }
1497  return nnode;
1498  }
1499 
1500  /* nnode is being committed, so copy it */
1501  n = kmalloc(sizeof(struct ubifs_nnode), GFP_NOFS);
1502  if (unlikely(!n))
1503  return ERR_PTR(-ENOMEM);
1504 
1505  memcpy(n, nnode, sizeof(struct ubifs_nnode));
1506  n->cnext = NULL;
1507  __set_bit(DIRTY_CNODE, &n->flags);
1508  __clear_bit(COW_CNODE, &n->flags);
1509 
1510  /* The children now have new parent */
1511  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1512  struct ubifs_nbranch *branch = &n->nbranch[i];
1513 
1514  if (branch->cnode)
1515  branch->cnode->parent = n;
1516  }
1517 
1519  __set_bit(OBSOLETE_CNODE, &nnode->flags);
1520 
1521  c->dirty_nn_cnt += 1;
1522  ubifs_add_nnode_dirt(c, nnode);
1523  if (nnode->parent)
1524  nnode->parent->nbranch[n->iip].nnode = n;
1525  else
1526  c->nroot = n;
1527  return n;
1528 }
1529 
1537 static struct ubifs_pnode *dirty_cow_pnode(struct ubifs_info *c,
1538  struct ubifs_pnode *pnode)
1539 {
1540  struct ubifs_pnode *p;
1541 
1542  if (!test_bit(COW_CNODE, &pnode->flags)) {
1543  /* pnode is not being committed */
1544  if (!test_and_set_bit(DIRTY_CNODE, &pnode->flags)) {
1545  c->dirty_pn_cnt += 1;
1546  add_pnode_dirt(c, pnode);
1547  }
1548  return pnode;
1549  }
1550 
1551  /* pnode is being committed, so copy it */
1552  p = kmalloc(sizeof(struct ubifs_pnode), GFP_NOFS);
1553  if (unlikely(!p))
1554  return ERR_PTR(-ENOMEM);
1555 
1556  memcpy(p, pnode, sizeof(struct ubifs_pnode));
1557  p->cnext = NULL;
1558  __set_bit(DIRTY_CNODE, &p->flags);
1559  __clear_bit(COW_CNODE, &p->flags);
1560  replace_cats(c, pnode, p);
1561 
1563  __set_bit(OBSOLETE_CNODE, &pnode->flags);
1564 
1565  c->dirty_pn_cnt += 1;
1566  add_pnode_dirt(c, pnode);
1567  pnode->parent->nbranch[p->iip].pnode = p;
1568  return p;
1569 }
1570 
1579 struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum)
1580 {
1581  int err, i, h, iip, shft;
1582  struct ubifs_nnode *nnode;
1583  struct ubifs_pnode *pnode;
1584 
1585  if (!c->nroot) {
1586  err = ubifs_read_nnode(c, NULL, 0);
1587  if (err)
1588  return ERR_PTR(err);
1589  }
1590  nnode = c->nroot;
1591  nnode = dirty_cow_nnode(c, nnode);
1592  if (IS_ERR(nnode))
1593  return ERR_CAST(nnode);
1594  i = lnum - c->main_first;
1595  shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
1596  for (h = 1; h < c->lpt_hght; h++) {
1597  iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
1598  shft -= UBIFS_LPT_FANOUT_SHIFT;
1599  nnode = ubifs_get_nnode(c, nnode, iip);
1600  if (IS_ERR(nnode))
1601  return ERR_CAST(nnode);
1602  nnode = dirty_cow_nnode(c, nnode);
1603  if (IS_ERR(nnode))
1604  return ERR_CAST(nnode);
1605  }
1606  iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
1607  shft -= UBIFS_LPT_FANOUT_SHIFT;
1608  pnode = ubifs_get_pnode(c, nnode, iip);
1609  if (IS_ERR(pnode))
1610  return ERR_CAST(pnode);
1611  pnode = dirty_cow_pnode(c, pnode);
1612  if (IS_ERR(pnode))
1613  return ERR_CAST(pnode);
1614  iip = (i & (UBIFS_LPT_FANOUT - 1));
1615  dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum,
1616  pnode->lprops[iip].free, pnode->lprops[iip].dirty,
1617  pnode->lprops[iip].flags);
1619  return &pnode->lprops[iip];
1620 }
1621 
1628 static int lpt_init_rd(struct ubifs_info *c)
1629 {
1630  int err, i;
1631 
1632  c->ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
1633  if (!c->ltab)
1634  return -ENOMEM;
1635 
1636  i = max_t(int, c->nnode_sz, c->pnode_sz);
1637  c->lpt_nod_buf = kmalloc(i, GFP_KERNEL);
1638  if (!c->lpt_nod_buf)
1639  return -ENOMEM;
1640 
1641  for (i = 0; i < LPROPS_HEAP_CNT; i++) {
1642  c->lpt_heap[i].arr = kmalloc(sizeof(void *) * LPT_HEAP_SZ,
1643  GFP_KERNEL);
1644  if (!c->lpt_heap[i].arr)
1645  return -ENOMEM;
1646  c->lpt_heap[i].cnt = 0;
1647  c->lpt_heap[i].max_cnt = LPT_HEAP_SZ;
1648  }
1649 
1650  c->dirty_idx.arr = kmalloc(sizeof(void *) * LPT_HEAP_SZ, GFP_KERNEL);
1651  if (!c->dirty_idx.arr)
1652  return -ENOMEM;
1653  c->dirty_idx.cnt = 0;
1654  c->dirty_idx.max_cnt = LPT_HEAP_SZ;
1655 
1656  err = read_ltab(c);
1657  if (err)
1658  return err;
1659 
1660  dbg_lp("space_bits %d", c->space_bits);
1661  dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits);
1662  dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits);
1663  dbg_lp("lpt_spc_bits %d", c->lpt_spc_bits);
1664  dbg_lp("pcnt_bits %d", c->pcnt_bits);
1665  dbg_lp("lnum_bits %d", c->lnum_bits);
1666  dbg_lp("pnode_sz %d", c->pnode_sz);
1667  dbg_lp("nnode_sz %d", c->nnode_sz);
1668  dbg_lp("ltab_sz %d", c->ltab_sz);
1669  dbg_lp("lsave_sz %d", c->lsave_sz);
1670  dbg_lp("lsave_cnt %d", c->lsave_cnt);
1671  dbg_lp("lpt_hght %d", c->lpt_hght);
1672  dbg_lp("big_lpt %d", c->big_lpt);
1673  dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs);
1674  dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs);
1675  dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs);
1676  if (c->big_lpt)
1677  dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs);
1678 
1679  return 0;
1680 }
1681 
1690 static int lpt_init_wr(struct ubifs_info *c)
1691 {
1692  int err, i;
1693 
1694  c->ltab_cmt = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
1695  if (!c->ltab_cmt)
1696  return -ENOMEM;
1697 
1698  c->lpt_buf = vmalloc(c->leb_size);
1699  if (!c->lpt_buf)
1700  return -ENOMEM;
1701 
1702  if (c->big_lpt) {
1703  c->lsave = kmalloc(sizeof(int) * c->lsave_cnt, GFP_NOFS);
1704  if (!c->lsave)
1705  return -ENOMEM;
1706  err = read_lsave(c);
1707  if (err)
1708  return err;
1709  }
1710 
1711  for (i = 0; i < c->lpt_lebs; i++)
1712  if (c->ltab[i].free == c->leb_size) {
1713  err = ubifs_leb_unmap(c, i + c->lpt_first);
1714  if (err)
1715  return err;
1716  }
1717 
1718  return 0;
1719 }
1720 
1733 int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr)
1734 {
1735  int err;
1736 
1737  if (rd) {
1738  err = lpt_init_rd(c);
1739  if (err)
1740  goto out_err;
1741  }
1742 
1743  if (wr) {
1744  err = lpt_init_wr(c);
1745  if (err)
1746  goto out_err;
1747  }
1748 
1749  return 0;
1750 
1751 out_err:
1752  if (wr)
1753  ubifs_lpt_free(c, 1);
1754  if (rd)
1755  ubifs_lpt_free(c, 0);
1756  return err;
1757 }
1758 
1771  union {
1772  struct ubifs_nnode nnode;
1773  struct ubifs_pnode pnode;
1775  };
1776  int in_tree;
1777  union {
1781  } ptr;
1782 };
1783 
1794 static struct ubifs_nnode *scan_get_nnode(struct ubifs_info *c,
1795  struct lpt_scan_node *path,
1796  struct ubifs_nnode *parent, int iip)
1797 {
1798  struct ubifs_nbranch *branch;
1799  struct ubifs_nnode *nnode;
1800  void *buf = c->lpt_nod_buf;
1801  int err;
1802 
1803  branch = &parent->nbranch[iip];
1804  nnode = branch->nnode;
1805  if (nnode) {
1806  path->in_tree = 1;
1807  path->ptr.nnode = nnode;
1808  return nnode;
1809  }
1810  nnode = &path->nnode;
1811  path->in_tree = 0;
1812  path->ptr.nnode = nnode;
1813  memset(nnode, 0, sizeof(struct ubifs_nnode));
1814  if (branch->lnum == 0) {
1815  /*
1816  * This nnode was not written which just means that the LEB
1817  * properties in the subtree below it describe empty LEBs. We
1818  * make the nnode as though we had read it, which in fact means
1819  * doing almost nothing.
1820  */
1821  if (c->big_lpt)
1822  nnode->num = calc_nnode_num_from_parent(c, parent, iip);
1823  } else {
1824  err = ubifs_leb_read(c, branch->lnum, buf, branch->offs,
1825  c->nnode_sz, 1);
1826  if (err)
1827  return ERR_PTR(err);
1828  err = ubifs_unpack_nnode(c, buf, nnode);
1829  if (err)
1830  return ERR_PTR(err);
1831  }
1832  err = validate_nnode(c, nnode, parent, iip);
1833  if (err)
1834  return ERR_PTR(err);
1835  if (!c->big_lpt)
1836  nnode->num = calc_nnode_num_from_parent(c, parent, iip);
1837  nnode->level = parent->level - 1;
1838  nnode->parent = parent;
1839  nnode->iip = iip;
1840  return nnode;
1841 }
1842 
1853 static struct ubifs_pnode *scan_get_pnode(struct ubifs_info *c,
1854  struct lpt_scan_node *path,
1855  struct ubifs_nnode *parent, int iip)
1856 {
1857  struct ubifs_nbranch *branch;
1858  struct ubifs_pnode *pnode;
1859  void *buf = c->lpt_nod_buf;
1860  int err;
1861 
1862  branch = &parent->nbranch[iip];
1863  pnode = branch->pnode;
1864  if (pnode) {
1865  path->in_tree = 1;
1866  path->ptr.pnode = pnode;
1867  return pnode;
1868  }
1869  pnode = &path->pnode;
1870  path->in_tree = 0;
1871  path->ptr.pnode = pnode;
1872  memset(pnode, 0, sizeof(struct ubifs_pnode));
1873  if (branch->lnum == 0) {
1874  /*
1875  * This pnode was not written which just means that the LEB
1876  * properties in it describe empty LEBs. We make the pnode as
1877  * though we had read it.
1878  */
1879  int i;
1880 
1881  if (c->big_lpt)
1882  pnode->num = calc_pnode_num_from_parent(c, parent, iip);
1883  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
1884  struct ubifs_lprops * const lprops = &pnode->lprops[i];
1885 
1886  lprops->free = c->leb_size;
1887  lprops->flags = ubifs_categorize_lprops(c, lprops);
1888  }
1889  } else {
1890  ubifs_assert(branch->lnum >= c->lpt_first &&
1891  branch->lnum <= c->lpt_last);
1892  ubifs_assert(branch->offs >= 0 && branch->offs < c->leb_size);
1893  err = ubifs_leb_read(c, branch->lnum, buf, branch->offs,
1894  c->pnode_sz, 1);
1895  if (err)
1896  return ERR_PTR(err);
1897  err = unpack_pnode(c, buf, pnode);
1898  if (err)
1899  return ERR_PTR(err);
1900  }
1901  err = validate_pnode(c, pnode, parent, iip);
1902  if (err)
1903  return ERR_PTR(err);
1904  if (!c->big_lpt)
1905  pnode->num = calc_pnode_num_from_parent(c, parent, iip);
1906  pnode->parent = parent;
1907  pnode->iip = iip;
1908  set_pnode_lnum(c, pnode);
1909  return pnode;
1910 }
1911 
1922 int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum,
1923  ubifs_lpt_scan_callback scan_cb, void *data)
1924 {
1925  int err = 0, i, h, iip, shft;
1926  struct ubifs_nnode *nnode;
1927  struct ubifs_pnode *pnode;
1928  struct lpt_scan_node *path;
1929 
1930  if (start_lnum == -1) {
1931  start_lnum = end_lnum + 1;
1932  if (start_lnum >= c->leb_cnt)
1933  start_lnum = c->main_first;
1934  }
1935 
1936  ubifs_assert(start_lnum >= c->main_first && start_lnum < c->leb_cnt);
1937  ubifs_assert(end_lnum >= c->main_first && end_lnum < c->leb_cnt);
1938 
1939  if (!c->nroot) {
1940  err = ubifs_read_nnode(c, NULL, 0);
1941  if (err)
1942  return err;
1943  }
1944 
1945  path = kmalloc(sizeof(struct lpt_scan_node) * (c->lpt_hght + 1),
1946  GFP_NOFS);
1947  if (!path)
1948  return -ENOMEM;
1949 
1950  path[0].ptr.nnode = c->nroot;
1951  path[0].in_tree = 1;
1952 again:
1953  /* Descend to the pnode containing start_lnum */
1954  nnode = c->nroot;
1955  i = start_lnum - c->main_first;
1956  shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
1957  for (h = 1; h < c->lpt_hght; h++) {
1958  iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
1959  shft -= UBIFS_LPT_FANOUT_SHIFT;
1960  nnode = scan_get_nnode(c, path + h, nnode, iip);
1961  if (IS_ERR(nnode)) {
1962  err = PTR_ERR(nnode);
1963  goto out;
1964  }
1965  }
1966  iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
1967  shft -= UBIFS_LPT_FANOUT_SHIFT;
1968  pnode = scan_get_pnode(c, path + h, nnode, iip);
1969  if (IS_ERR(pnode)) {
1970  err = PTR_ERR(pnode);
1971  goto out;
1972  }
1973  iip = (i & (UBIFS_LPT_FANOUT - 1));
1974 
1975  /* Loop for each lprops */
1976  while (1) {
1977  struct ubifs_lprops *lprops = &pnode->lprops[iip];
1978  int ret, lnum = lprops->lnum;
1979 
1980  ret = scan_cb(c, lprops, path[h].in_tree, data);
1981  if (ret < 0) {
1982  err = ret;
1983  goto out;
1984  }
1985  if (ret & LPT_SCAN_ADD) {
1986  /* Add all the nodes in path to the tree in memory */
1987  for (h = 1; h < c->lpt_hght; h++) {
1988  const size_t sz = sizeof(struct ubifs_nnode);
1989  struct ubifs_nnode *parent;
1990 
1991  if (path[h].in_tree)
1992  continue;
1993  nnode = kmemdup(&path[h].nnode, sz, GFP_NOFS);
1994  if (!nnode) {
1995  err = -ENOMEM;
1996  goto out;
1997  }
1998  parent = nnode->parent;
1999  parent->nbranch[nnode->iip].nnode = nnode;
2000  path[h].ptr.nnode = nnode;
2001  path[h].in_tree = 1;
2002  path[h + 1].cnode.parent = nnode;
2003  }
2004  if (path[h].in_tree)
2005  ubifs_ensure_cat(c, lprops);
2006  else {
2007  const size_t sz = sizeof(struct ubifs_pnode);
2008  struct ubifs_nnode *parent;
2009 
2010  pnode = kmemdup(&path[h].pnode, sz, GFP_NOFS);
2011  if (!pnode) {
2012  err = -ENOMEM;
2013  goto out;
2014  }
2015  parent = pnode->parent;
2016  parent->nbranch[pnode->iip].pnode = pnode;
2017  path[h].ptr.pnode = pnode;
2018  path[h].in_tree = 1;
2019  update_cats(c, pnode);
2020  c->pnodes_have += 1;
2021  }
2022  err = dbg_check_lpt_nodes(c, (struct ubifs_cnode *)
2023  c->nroot, 0, 0);
2024  if (err)
2025  goto out;
2026  err = dbg_check_cats(c);
2027  if (err)
2028  goto out;
2029  }
2030  if (ret & LPT_SCAN_STOP) {
2031  err = 0;
2032  break;
2033  }
2034  /* Get the next lprops */
2035  if (lnum == end_lnum) {
2036  /*
2037  * We got to the end without finding what we were
2038  * looking for
2039  */
2040  err = -ENOSPC;
2041  goto out;
2042  }
2043  if (lnum + 1 >= c->leb_cnt) {
2044  /* Wrap-around to the beginning */
2045  start_lnum = c->main_first;
2046  goto again;
2047  }
2048  if (iip + 1 < UBIFS_LPT_FANOUT) {
2049  /* Next lprops is in the same pnode */
2050  iip += 1;
2051  continue;
2052  }
2053  /* We need to get the next pnode. Go up until we can go right */
2054  iip = pnode->iip;
2055  while (1) {
2056  h -= 1;
2057  ubifs_assert(h >= 0);
2058  nnode = path[h].ptr.nnode;
2059  if (iip + 1 < UBIFS_LPT_FANOUT)
2060  break;
2061  iip = nnode->iip;
2062  }
2063  /* Go right */
2064  iip += 1;
2065  /* Descend to the pnode */
2066  h += 1;
2067  for (; h < c->lpt_hght; h++) {
2068  nnode = scan_get_nnode(c, path + h, nnode, iip);
2069  if (IS_ERR(nnode)) {
2070  err = PTR_ERR(nnode);
2071  goto out;
2072  }
2073  iip = 0;
2074  }
2075  pnode = scan_get_pnode(c, path + h, nnode, iip);
2076  if (IS_ERR(pnode)) {
2077  err = PTR_ERR(pnode);
2078  goto out;
2079  }
2080  iip = 0;
2081  }
2082 out:
2083  kfree(path);
2084  return err;
2085 }
2086 
2095 static int dbg_chk_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
2096  int col)
2097 {
2098  int i;
2099 
2100  if (pnode->num != col) {
2101  ubifs_err("pnode num %d expected %d parent num %d iip %d",
2102  pnode->num, col, pnode->parent->num, pnode->iip);
2103  return -EINVAL;
2104  }
2105  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
2106  struct ubifs_lprops *lp, *lprops = &pnode->lprops[i];
2107  int lnum = (pnode->num << UBIFS_LPT_FANOUT_SHIFT) + i +
2108  c->main_first;
2109  int found, cat = lprops->flags & LPROPS_CAT_MASK;
2110  struct ubifs_lpt_heap *heap;
2111  struct list_head *list = NULL;
2112 
2113  if (lnum >= c->leb_cnt)
2114  continue;
2115  if (lprops->lnum != lnum) {
2116  ubifs_err("bad LEB number %d expected %d",
2117  lprops->lnum, lnum);
2118  return -EINVAL;
2119  }
2120  if (lprops->flags & LPROPS_TAKEN) {
2121  if (cat != LPROPS_UNCAT) {
2122  ubifs_err("LEB %d taken but not uncat %d",
2123  lprops->lnum, cat);
2124  return -EINVAL;
2125  }
2126  continue;
2127  }
2128  if (lprops->flags & LPROPS_INDEX) {
2129  switch (cat) {
2130  case LPROPS_UNCAT:
2131  case LPROPS_DIRTY_IDX:
2132  case LPROPS_FRDI_IDX:
2133  break;
2134  default:
2135  ubifs_err("LEB %d index but cat %d",
2136  lprops->lnum, cat);
2137  return -EINVAL;
2138  }
2139  } else {
2140  switch (cat) {
2141  case LPROPS_UNCAT:
2142  case LPROPS_DIRTY:
2143  case LPROPS_FREE:
2144  case LPROPS_EMPTY:
2145  case LPROPS_FREEABLE:
2146  break;
2147  default:
2148  ubifs_err("LEB %d not index but cat %d",
2149  lprops->lnum, cat);
2150  return -EINVAL;
2151  }
2152  }
2153  switch (cat) {
2154  case LPROPS_UNCAT:
2155  list = &c->uncat_list;
2156  break;
2157  case LPROPS_EMPTY:
2158  list = &c->empty_list;
2159  break;
2160  case LPROPS_FREEABLE:
2161  list = &c->freeable_list;
2162  break;
2163  case LPROPS_FRDI_IDX:
2164  list = &c->frdi_idx_list;
2165  break;
2166  }
2167  found = 0;
2168  switch (cat) {
2169  case LPROPS_DIRTY:
2170  case LPROPS_DIRTY_IDX:
2171  case LPROPS_FREE:
2172  heap = &c->lpt_heap[cat - 1];
2173  if (lprops->hpos < heap->cnt &&
2174  heap->arr[lprops->hpos] == lprops)
2175  found = 1;
2176  break;
2177  case LPROPS_UNCAT:
2178  case LPROPS_EMPTY:
2179  case LPROPS_FREEABLE:
2180  case LPROPS_FRDI_IDX:
2182  if (lprops == lp) {
2183  found = 1;
2184  break;
2185  }
2186  break;
2187  }
2188  if (!found) {
2189  ubifs_err("LEB %d cat %d not found in cat heap/list",
2190  lprops->lnum, cat);
2191  return -EINVAL;
2192  }
2193  switch (cat) {
2194  case LPROPS_EMPTY:
2195  if (lprops->free != c->leb_size) {
2196  ubifs_err("LEB %d cat %d free %d dirty %d",
2197  lprops->lnum, cat, lprops->free,
2198  lprops->dirty);
2199  return -EINVAL;
2200  }
2201  case LPROPS_FREEABLE:
2202  case LPROPS_FRDI_IDX:
2203  if (lprops->free + lprops->dirty != c->leb_size) {
2204  ubifs_err("LEB %d cat %d free %d dirty %d",
2205  lprops->lnum, cat, lprops->free,
2206  lprops->dirty);
2207  return -EINVAL;
2208  }
2209  }
2210  }
2211  return 0;
2212 }
2213 
2223 int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
2224  int row, int col)
2225 {
2226  struct ubifs_nnode *nnode, *nn;
2227  struct ubifs_cnode *cn;
2228  int num, iip = 0, err;
2229 
2230  if (!dbg_is_chk_lprops(c))
2231  return 0;
2232 
2233  while (cnode) {
2234  ubifs_assert(row >= 0);
2235  nnode = cnode->parent;
2236  if (cnode->level) {
2237  /* cnode is a nnode */
2238  num = calc_nnode_num(row, col);
2239  if (cnode->num != num) {
2240  ubifs_err("nnode num %d expected %d parent num %d iip %d",
2241  cnode->num, num,
2242  (nnode ? nnode->num : 0), cnode->iip);
2243  return -EINVAL;
2244  }
2245  nn = (struct ubifs_nnode *)cnode;
2246  while (iip < UBIFS_LPT_FANOUT) {
2247  cn = nn->nbranch[iip].cnode;
2248  if (cn) {
2249  /* Go down */
2250  row += 1;
2251  col <<= UBIFS_LPT_FANOUT_SHIFT;
2252  col += iip;
2253  iip = 0;
2254  cnode = cn;
2255  break;
2256  }
2257  /* Go right */
2258  iip += 1;
2259  }
2260  if (iip < UBIFS_LPT_FANOUT)
2261  continue;
2262  } else {
2263  struct ubifs_pnode *pnode;
2264 
2265  /* cnode is a pnode */
2266  pnode = (struct ubifs_pnode *)cnode;
2267  err = dbg_chk_pnode(c, pnode, col);
2268  if (err)
2269  return err;
2270  }
2271  /* Go up and to the right */
2272  row -= 1;
2273  col >>= UBIFS_LPT_FANOUT_SHIFT;
2274  iip = cnode->iip + 1;
2275  cnode = (struct ubifs_cnode *)nnode;
2276  }
2277  return 0;
2278 }