Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tnc.c
Go to the documentation of this file.
1 /*
2  * This file is part of UBIFS.
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 51
17  * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18  *
19  * Authors: Adrian Hunter
20  * Artem Bityutskiy (Битюцкий Артём)
21  */
22 
23 /*
24  * This file implements TNC (Tree Node Cache) which caches indexing nodes of
25  * the UBIFS B-tree.
26  *
27  * At the moment the locking rules of the TNC tree are quite simple and
28  * straightforward. We just have a mutex and lock it when we traverse the
29  * tree. If a znode is not in memory, we read it from flash while still having
30  * the mutex locked.
31  */
32 
33 #include <linux/crc32.h>
34 #include <linux/slab.h>
35 #include "ubifs.h"
36 
37 /*
38  * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
39  * @NAME_LESS: name corresponding to the first argument is less than second
40  * @NAME_MATCHES: names match
41  * @NAME_GREATER: name corresponding to the second argument is greater than
42  * first
43  * @NOT_ON_MEDIA: node referred by zbranch does not exist on the media
44  *
45  * These constants were introduce to improve readability.
46  */
47 enum {
48  NAME_LESS = 0,
52 };
53 
77 static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
78 {
79  struct ubifs_old_idx *old_idx, *o;
80  struct rb_node **p, *parent = NULL;
81 
82  old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
83  if (unlikely(!old_idx))
84  return -ENOMEM;
85  old_idx->lnum = lnum;
86  old_idx->offs = offs;
87 
88  p = &c->old_idx.rb_node;
89  while (*p) {
90  parent = *p;
91  o = rb_entry(parent, struct ubifs_old_idx, rb);
92  if (lnum < o->lnum)
93  p = &(*p)->rb_left;
94  else if (lnum > o->lnum)
95  p = &(*p)->rb_right;
96  else if (offs < o->offs)
97  p = &(*p)->rb_left;
98  else if (offs > o->offs)
99  p = &(*p)->rb_right;
100  else {
101  ubifs_err("old idx added twice!");
102  kfree(old_idx);
103  return 0;
104  }
105  }
106  rb_link_node(&old_idx->rb, parent, p);
107  rb_insert_color(&old_idx->rb, &c->old_idx);
108  return 0;
109 }
110 
118 int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode)
119 {
120  if (znode->parent) {
121  struct ubifs_zbranch *zbr;
122 
123  zbr = &znode->parent->zbranch[znode->iip];
124  if (zbr->len)
125  return insert_old_idx(c, zbr->lnum, zbr->offs);
126  } else
127  if (c->zroot.len)
128  return insert_old_idx(c, c->zroot.lnum,
129  c->zroot.offs);
130  return 0;
131 }
132 
140 static int ins_clr_old_idx_znode(struct ubifs_info *c,
141  struct ubifs_znode *znode)
142 {
143  int err;
144 
145  if (znode->parent) {
146  struct ubifs_zbranch *zbr;
147 
148  zbr = &znode->parent->zbranch[znode->iip];
149  if (zbr->len) {
150  err = insert_old_idx(c, zbr->lnum, zbr->offs);
151  if (err)
152  return err;
153  zbr->lnum = 0;
154  zbr->offs = 0;
155  zbr->len = 0;
156  }
157  } else
158  if (c->zroot.len) {
159  err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs);
160  if (err)
161  return err;
162  c->zroot.lnum = 0;
163  c->zroot.offs = 0;
164  c->zroot.len = 0;
165  }
166  return 0;
167 }
168 
180 {
181  struct rb_node *this = c->old_idx.rb_node;
182  struct ubifs_old_idx *old_idx;
183 
184  while (this) {
185  if (this->rb_left) {
186  this = this->rb_left;
187  continue;
188  } else if (this->rb_right) {
189  this = this->rb_right;
190  continue;
191  }
192  old_idx = rb_entry(this, struct ubifs_old_idx, rb);
193  this = rb_parent(this);
194  if (this) {
195  if (this->rb_left == &old_idx->rb)
196  this->rb_left = NULL;
197  else
198  this->rb_right = NULL;
199  }
200  kfree(old_idx);
201  }
202  c->old_idx = RB_ROOT;
203 }
204 
212 static struct ubifs_znode *copy_znode(struct ubifs_info *c,
213  struct ubifs_znode *znode)
214 {
215  struct ubifs_znode *zn;
216 
217  zn = kmalloc(c->max_znode_sz, GFP_NOFS);
218  if (unlikely(!zn))
219  return ERR_PTR(-ENOMEM);
220 
221  memcpy(zn, znode, c->max_znode_sz);
222  zn->cnext = NULL;
223  __set_bit(DIRTY_ZNODE, &zn->flags);
224  __clear_bit(COW_ZNODE, &zn->flags);
225 
226  ubifs_assert(!ubifs_zn_obsolete(znode));
227  __set_bit(OBSOLETE_ZNODE, &znode->flags);
228 
229  if (znode->level != 0) {
230  int i;
231  const int n = zn->child_cnt;
232 
233  /* The children now have new parent */
234  for (i = 0; i < n; i++) {
235  struct ubifs_zbranch *zbr = &zn->zbranch[i];
236 
237  if (zbr->znode)
238  zbr->znode->parent = zn;
239  }
240  }
241 
242  atomic_long_inc(&c->dirty_zn_cnt);
243  return zn;
244 }
245 
254 static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
255 {
256  c->calc_idx_sz -= ALIGN(dirt, 8);
257  return ubifs_add_dirt(c, lnum, dirt);
258 }
259 
267 static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
268  struct ubifs_zbranch *zbr)
269 {
270  struct ubifs_znode *znode = zbr->znode;
271  struct ubifs_znode *zn;
272  int err;
273 
274  if (!ubifs_zn_cow(znode)) {
275  /* znode is not being committed */
276  if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) {
277  atomic_long_inc(&c->dirty_zn_cnt);
278  atomic_long_dec(&c->clean_zn_cnt);
279  atomic_long_dec(&ubifs_clean_zn_cnt);
280  err = add_idx_dirt(c, zbr->lnum, zbr->len);
281  if (unlikely(err))
282  return ERR_PTR(err);
283  }
284  return znode;
285  }
286 
287  zn = copy_znode(c, znode);
288  if (IS_ERR(zn))
289  return zn;
290 
291  if (zbr->len) {
292  err = insert_old_idx(c, zbr->lnum, zbr->offs);
293  if (unlikely(err))
294  return ERR_PTR(err);
295  err = add_idx_dirt(c, zbr->lnum, zbr->len);
296  } else
297  err = 0;
298 
299  zbr->znode = zn;
300  zbr->lnum = 0;
301  zbr->offs = 0;
302  zbr->len = 0;
303 
304  if (unlikely(err))
305  return ERR_PTR(err);
306  return zn;
307 }
308 
329 static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
330  const void *node)
331 {
332  int err;
333  void *lnc_node;
334  const struct ubifs_dent_node *dent = node;
335 
336  ubifs_assert(!zbr->leaf);
337  ubifs_assert(zbr->len != 0);
338  ubifs_assert(is_hash_key(c, &zbr->key));
339 
340  err = ubifs_validate_entry(c, dent);
341  if (err) {
342  dump_stack();
343  ubifs_dump_node(c, dent);
344  return err;
345  }
346 
347  lnc_node = kmemdup(node, zbr->len, GFP_NOFS);
348  if (!lnc_node)
349  /* We don't have to have the cache, so no error */
350  return 0;
351 
352  zbr->leaf = lnc_node;
353  return 0;
354 }
355 
365 static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
366  void *node)
367 {
368  int err;
369 
370  ubifs_assert(!zbr->leaf);
371  ubifs_assert(zbr->len != 0);
372 
373  err = ubifs_validate_entry(c, node);
374  if (err) {
375  dump_stack();
376  ubifs_dump_node(c, node);
377  return err;
378  }
379 
380  zbr->leaf = node;
381  return 0;
382 }
383 
389 static void lnc_free(struct ubifs_zbranch *zbr)
390 {
391  if (!zbr->leaf)
392  return;
393  kfree(zbr->leaf);
394  zbr->leaf = NULL;
395 }
396 
408 static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
409  void *node)
410 {
411  int err;
412 
413  ubifs_assert(is_hash_key(c, &zbr->key));
414 
415  if (zbr->leaf) {
416  /* Read from the leaf node cache */
417  ubifs_assert(zbr->len != 0);
418  memcpy(node, zbr->leaf, zbr->len);
419  return 0;
420  }
421 
422  err = ubifs_tnc_read_node(c, zbr, node);
423  if (err)
424  return err;
425 
426  /* Add the node to the leaf node cache */
427  err = lnc_add(c, zbr, node);
428  return err;
429 }
430 
455 static int try_read_node(const struct ubifs_info *c, void *buf, int type,
456  int len, int lnum, int offs)
457 {
458  int err, node_len;
459  struct ubifs_ch *ch = buf;
461 
462  dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
463 
464  err = ubifs_leb_read(c, lnum, buf, offs, len, 1);
465  if (err) {
466  ubifs_err("cannot read node type %d from LEB %d:%d, error %d",
467  type, lnum, offs, err);
468  return err;
469  }
470 
471  if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
472  return 0;
473 
474  if (ch->node_type != type)
475  return 0;
476 
477  node_len = le32_to_cpu(ch->len);
478  if (node_len != len)
479  return 0;
480 
481  if (type == UBIFS_DATA_NODE && c->no_chk_data_crc && !c->mounting &&
482  !c->remounting_rw)
483  return 1;
484 
485  crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
486  node_crc = le32_to_cpu(ch->crc);
487  if (crc != node_crc)
488  return 0;
489 
490  return 1;
491 }
492 
503 static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
504  struct ubifs_zbranch *zbr, void *node)
505 {
506  int ret;
507 
508  dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs);
509 
510  ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum,
511  zbr->offs);
512  if (ret == 1) {
513  union ubifs_key node_key;
514  struct ubifs_dent_node *dent = node;
515 
516  /* All nodes have key in the same place */
517  key_read(c, &dent->key, &node_key);
518  if (keys_cmp(c, key, &node_key) != 0)
519  ret = 0;
520  }
521  if (ret == 0 && c->replaying)
522  dbg_mntk(key, "dangling branch LEB %d:%d len %d, key ",
523  zbr->lnum, zbr->offs, zbr->len);
524  return ret;
525 }
526 
538 static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
539  const struct qstr *nm)
540 {
541  struct ubifs_dent_node *dent;
542  int nlen, err;
543 
544  /* If possible, match against the dent in the leaf node cache */
545  if (!zbr->leaf) {
546  dent = kmalloc(zbr->len, GFP_NOFS);
547  if (!dent)
548  return -ENOMEM;
549 
550  err = ubifs_tnc_read_node(c, zbr, dent);
551  if (err)
552  goto out_free;
553 
554  /* Add the node to the leaf node cache */
555  err = lnc_add_directly(c, zbr, dent);
556  if (err)
557  goto out_free;
558  } else
559  dent = zbr->leaf;
560 
561  nlen = le16_to_cpu(dent->nlen);
562  err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
563  if (err == 0) {
564  if (nlen == nm->len)
565  return NAME_MATCHES;
566  else if (nlen < nm->len)
567  return NAME_LESS;
568  else
569  return NAME_GREATER;
570  } else if (err < 0)
571  return NAME_LESS;
572  else
573  return NAME_GREATER;
574 
575 out_free:
576  kfree(dent);
577  return err;
578 }
579 
588 static struct ubifs_znode *get_znode(struct ubifs_info *c,
589  struct ubifs_znode *znode, int n)
590 {
591  struct ubifs_zbranch *zbr;
592 
593  zbr = &znode->zbranch[n];
594  if (zbr->znode)
595  znode = zbr->znode;
596  else
597  znode = ubifs_load_znode(c, zbr, znode, n);
598  return znode;
599 }
600 
610 static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
611 {
612  struct ubifs_znode *znode = *zn;
613  int nn = *n;
614 
615  nn += 1;
616  if (nn < znode->child_cnt) {
617  *n = nn;
618  return 0;
619  }
620  while (1) {
621  struct ubifs_znode *zp;
622 
623  zp = znode->parent;
624  if (!zp)
625  return -ENOENT;
626  nn = znode->iip + 1;
627  znode = zp;
628  if (nn < znode->child_cnt) {
629  znode = get_znode(c, znode, nn);
630  if (IS_ERR(znode))
631  return PTR_ERR(znode);
632  while (znode->level != 0) {
633  znode = get_znode(c, znode, 0);
634  if (IS_ERR(znode))
635  return PTR_ERR(znode);
636  }
637  nn = 0;
638  break;
639  }
640  }
641  *zn = znode;
642  *n = nn;
643  return 0;
644 }
645 
655 static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
656 {
657  struct ubifs_znode *znode = *zn;
658  int nn = *n;
659 
660  if (nn > 0) {
661  *n = nn - 1;
662  return 0;
663  }
664  while (1) {
665  struct ubifs_znode *zp;
666 
667  zp = znode->parent;
668  if (!zp)
669  return -ENOENT;
670  nn = znode->iip - 1;
671  znode = zp;
672  if (nn >= 0) {
673  znode = get_znode(c, znode, nn);
674  if (IS_ERR(znode))
675  return PTR_ERR(znode);
676  while (znode->level != 0) {
677  nn = znode->child_cnt - 1;
678  znode = get_znode(c, znode, nn);
679  if (IS_ERR(znode))
680  return PTR_ERR(znode);
681  }
682  nn = znode->child_cnt - 1;
683  break;
684  }
685  }
686  *zn = znode;
687  *n = nn;
688  return 0;
689 }
690 
707 static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
708  struct ubifs_znode **zn, int *n,
709  const struct qstr *nm)
710 {
711  int err;
712 
713  err = matches_name(c, &(*zn)->zbranch[*n], nm);
714  if (unlikely(err < 0))
715  return err;
716  if (err == NAME_MATCHES)
717  return 1;
718 
719  if (err == NAME_GREATER) {
720  /* Look left */
721  while (1) {
722  err = tnc_prev(c, zn, n);
723  if (err == -ENOENT) {
724  ubifs_assert(*n == 0);
725  *n = -1;
726  return 0;
727  }
728  if (err < 0)
729  return err;
730  if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
731  /*
732  * We have found the branch after which we would
733  * like to insert, but inserting in this znode
734  * may still be wrong. Consider the following 3
735  * znodes, in the case where we are resolving a
736  * collision with Key2.
737  *
738  * znode zp
739  * ----------------------
740  * level 1 | Key0 | Key1 |
741  * -----------------------
742  * | |
743  * znode za | | znode zb
744  * ------------ ------------
745  * level 0 | Key0 | | Key2 |
746  * ------------ ------------
747  *
748  * The lookup finds Key2 in znode zb. Lets say
749  * there is no match and the name is greater so
750  * we look left. When we find Key0, we end up
751  * here. If we return now, we will insert into
752  * znode za at slot n = 1. But that is invalid
753  * according to the parent's keys. Key2 must
754  * be inserted into znode zb.
755  *
756  * Note, this problem is not relevant for the
757  * case when we go right, because
758  * 'tnc_insert()' would correct the parent key.
759  */
760  if (*n == (*zn)->child_cnt - 1) {
761  err = tnc_next(c, zn, n);
762  if (err) {
763  /* Should be impossible */
764  ubifs_assert(0);
765  if (err == -ENOENT)
766  err = -EINVAL;
767  return err;
768  }
769  ubifs_assert(*n == 0);
770  *n = -1;
771  }
772  return 0;
773  }
774  err = matches_name(c, &(*zn)->zbranch[*n], nm);
775  if (err < 0)
776  return err;
777  if (err == NAME_LESS)
778  return 0;
779  if (err == NAME_MATCHES)
780  return 1;
781  ubifs_assert(err == NAME_GREATER);
782  }
783  } else {
784  int nn = *n;
785  struct ubifs_znode *znode = *zn;
786 
787  /* Look right */
788  while (1) {
789  err = tnc_next(c, &znode, &nn);
790  if (err == -ENOENT)
791  return 0;
792  if (err < 0)
793  return err;
794  if (keys_cmp(c, &znode->zbranch[nn].key, key))
795  return 0;
796  err = matches_name(c, &znode->zbranch[nn], nm);
797  if (err < 0)
798  return err;
799  if (err == NAME_GREATER)
800  return 0;
801  *zn = znode;
802  *n = nn;
803  if (err == NAME_MATCHES)
804  return 1;
805  ubifs_assert(err == NAME_LESS);
806  }
807  }
808 }
809 
825 static int fallible_matches_name(struct ubifs_info *c,
826  struct ubifs_zbranch *zbr,
827  const struct qstr *nm)
828 {
829  struct ubifs_dent_node *dent;
830  int nlen, err;
831 
832  /* If possible, match against the dent in the leaf node cache */
833  if (!zbr->leaf) {
834  dent = kmalloc(zbr->len, GFP_NOFS);
835  if (!dent)
836  return -ENOMEM;
837 
838  err = fallible_read_node(c, &zbr->key, zbr, dent);
839  if (err < 0)
840  goto out_free;
841  if (err == 0) {
842  /* The node was not present */
843  err = NOT_ON_MEDIA;
844  goto out_free;
845  }
846  ubifs_assert(err == 1);
847 
848  err = lnc_add_directly(c, zbr, dent);
849  if (err)
850  goto out_free;
851  } else
852  dent = zbr->leaf;
853 
854  nlen = le16_to_cpu(dent->nlen);
855  err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
856  if (err == 0) {
857  if (nlen == nm->len)
858  return NAME_MATCHES;
859  else if (nlen < nm->len)
860  return NAME_LESS;
861  else
862  return NAME_GREATER;
863  } else if (err < 0)
864  return NAME_LESS;
865  else
866  return NAME_GREATER;
867 
868 out_free:
869  kfree(dent);
870  return err;
871 }
872 
895 static int fallible_resolve_collision(struct ubifs_info *c,
896  const union ubifs_key *key,
897  struct ubifs_znode **zn, int *n,
898  const struct qstr *nm, int adding)
899 {
900  struct ubifs_znode *o_znode = NULL, *znode = *zn;
901  int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n;
902 
903  cmp = fallible_matches_name(c, &znode->zbranch[nn], nm);
904  if (unlikely(cmp < 0))
905  return cmp;
906  if (cmp == NAME_MATCHES)
907  return 1;
908  if (cmp == NOT_ON_MEDIA) {
909  o_znode = znode;
910  o_n = nn;
911  /*
912  * We are unlucky and hit a dangling branch straight away.
913  * Now we do not really know where to go to find the needed
914  * branch - to the left or to the right. Well, let's try left.
915  */
916  unsure = 1;
917  } else if (!adding)
918  unsure = 1; /* Remove a dangling branch wherever it is */
919 
920  if (cmp == NAME_GREATER || unsure) {
921  /* Look left */
922  while (1) {
923  err = tnc_prev(c, zn, n);
924  if (err == -ENOENT) {
925  ubifs_assert(*n == 0);
926  *n = -1;
927  break;
928  }
929  if (err < 0)
930  return err;
931  if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
932  /* See comments in 'resolve_collision()' */
933  if (*n == (*zn)->child_cnt - 1) {
934  err = tnc_next(c, zn, n);
935  if (err) {
936  /* Should be impossible */
937  ubifs_assert(0);
938  if (err == -ENOENT)
939  err = -EINVAL;
940  return err;
941  }
942  ubifs_assert(*n == 0);
943  *n = -1;
944  }
945  break;
946  }
947  err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm);
948  if (err < 0)
949  return err;
950  if (err == NAME_MATCHES)
951  return 1;
952  if (err == NOT_ON_MEDIA) {
953  o_znode = *zn;
954  o_n = *n;
955  continue;
956  }
957  if (!adding)
958  continue;
959  if (err == NAME_LESS)
960  break;
961  else
962  unsure = 0;
963  }
964  }
965 
966  if (cmp == NAME_LESS || unsure) {
967  /* Look right */
968  *zn = znode;
969  *n = nn;
970  while (1) {
971  err = tnc_next(c, &znode, &nn);
972  if (err == -ENOENT)
973  break;
974  if (err < 0)
975  return err;
976  if (keys_cmp(c, &znode->zbranch[nn].key, key))
977  break;
978  err = fallible_matches_name(c, &znode->zbranch[nn], nm);
979  if (err < 0)
980  return err;
981  if (err == NAME_GREATER)
982  break;
983  *zn = znode;
984  *n = nn;
985  if (err == NAME_MATCHES)
986  return 1;
987  if (err == NOT_ON_MEDIA) {
988  o_znode = znode;
989  o_n = nn;
990  }
991  }
992  }
993 
994  /* Never match a dangling branch when adding */
995  if (adding || !o_znode)
996  return 0;
997 
998  dbg_mntk(key, "dangling match LEB %d:%d len %d key ",
999  o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs,
1000  o_znode->zbranch[o_n].len);
1001  *zn = o_znode;
1002  *n = o_n;
1003  return 1;
1004 }
1005 
1014 static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs)
1015 {
1016  if (zbr->lnum == lnum && zbr->offs == offs)
1017  return 1;
1018  else
1019  return 0;
1020 }
1021 
1039 static int resolve_collision_directly(struct ubifs_info *c,
1040  const union ubifs_key *key,
1041  struct ubifs_znode **zn, int *n,
1042  int lnum, int offs)
1043 {
1044  struct ubifs_znode *znode;
1045  int nn, err;
1046 
1047  znode = *zn;
1048  nn = *n;
1049  if (matches_position(&znode->zbranch[nn], lnum, offs))
1050  return 1;
1051 
1052  /* Look left */
1053  while (1) {
1054  err = tnc_prev(c, &znode, &nn);
1055  if (err == -ENOENT)
1056  break;
1057  if (err < 0)
1058  return err;
1059  if (keys_cmp(c, &znode->zbranch[nn].key, key))
1060  break;
1061  if (matches_position(&znode->zbranch[nn], lnum, offs)) {
1062  *zn = znode;
1063  *n = nn;
1064  return 1;
1065  }
1066  }
1067 
1068  /* Look right */
1069  znode = *zn;
1070  nn = *n;
1071  while (1) {
1072  err = tnc_next(c, &znode, &nn);
1073  if (err == -ENOENT)
1074  return 0;
1075  if (err < 0)
1076  return err;
1077  if (keys_cmp(c, &znode->zbranch[nn].key, key))
1078  return 0;
1079  *zn = znode;
1080  *n = nn;
1081  if (matches_position(&znode->zbranch[nn], lnum, offs))
1082  return 1;
1083  }
1084 }
1085 
1096 static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
1097  struct ubifs_znode *znode)
1098 {
1099  struct ubifs_znode *zp;
1100  int *path = c->bottom_up_buf, p = 0;
1101 
1102  ubifs_assert(c->zroot.znode);
1103  ubifs_assert(znode);
1104  if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
1105  kfree(c->bottom_up_buf);
1106  c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int),
1107  GFP_NOFS);
1108  if (!c->bottom_up_buf)
1109  return ERR_PTR(-ENOMEM);
1110  path = c->bottom_up_buf;
1111  }
1112  if (c->zroot.znode->level) {
1113  /* Go up until parent is dirty */
1114  while (1) {
1115  int n;
1116 
1117  zp = znode->parent;
1118  if (!zp)
1119  break;
1120  n = znode->iip;
1121  ubifs_assert(p < c->zroot.znode->level);
1122  path[p++] = n;
1123  if (!zp->cnext && ubifs_zn_dirty(znode))
1124  break;
1125  znode = zp;
1126  }
1127  }
1128 
1129  /* Come back down, dirtying as we go */
1130  while (1) {
1131  struct ubifs_zbranch *zbr;
1132 
1133  zp = znode->parent;
1134  if (zp) {
1135  ubifs_assert(path[p - 1] >= 0);
1136  ubifs_assert(path[p - 1] < zp->child_cnt);
1137  zbr = &zp->zbranch[path[--p]];
1138  znode = dirty_cow_znode(c, zbr);
1139  } else {
1140  ubifs_assert(znode == c->zroot.znode);
1141  znode = dirty_cow_znode(c, &c->zroot);
1142  }
1143  if (IS_ERR(znode) || !p)
1144  break;
1145  ubifs_assert(path[p - 1] >= 0);
1146  ubifs_assert(path[p - 1] < znode->child_cnt);
1147  znode = znode->zbranch[path[p - 1]].znode;
1148  }
1149 
1150  return znode;
1151 }
1152 
1175 int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
1176  struct ubifs_znode **zn, int *n)
1177 {
1178  int err, exact;
1179  struct ubifs_znode *znode;
1180  unsigned long time = get_seconds();
1181 
1182  dbg_tnck(key, "search key ");
1183  ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
1184 
1185  znode = c->zroot.znode;
1186  if (unlikely(!znode)) {
1187  znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1188  if (IS_ERR(znode))
1189  return PTR_ERR(znode);
1190  }
1191 
1192  znode->time = time;
1193 
1194  while (1) {
1195  struct ubifs_zbranch *zbr;
1196 
1197  exact = ubifs_search_zbranch(c, znode, key, n);
1198 
1199  if (znode->level == 0)
1200  break;
1201 
1202  if (*n < 0)
1203  *n = 0;
1204  zbr = &znode->zbranch[*n];
1205 
1206  if (zbr->znode) {
1207  znode->time = time;
1208  znode = zbr->znode;
1209  continue;
1210  }
1211 
1212  /* znode is not in TNC cache, load it from the media */
1213  znode = ubifs_load_znode(c, zbr, znode, *n);
1214  if (IS_ERR(znode))
1215  return PTR_ERR(znode);
1216  }
1217 
1218  *zn = znode;
1219  if (exact || !is_hash_key(c, key) || *n != -1) {
1220  dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1221  return exact;
1222  }
1223 
1224  /*
1225  * Here is a tricky place. We have not found the key and this is a
1226  * "hashed" key, which may collide. The rest of the code deals with
1227  * situations like this:
1228  *
1229  * | 3 | 5 |
1230  * / \
1231  * | 3 | 5 | | 6 | 7 | (x)
1232  *
1233  * Or more a complex example:
1234  *
1235  * | 1 | 5 |
1236  * / \
1237  * | 1 | 3 | | 5 | 8 |
1238  * \ /
1239  * | 5 | 5 | | 6 | 7 | (x)
1240  *
1241  * In the examples, if we are looking for key "5", we may reach nodes
1242  * marked with "(x)". In this case what we have do is to look at the
1243  * left and see if there is "5" key there. If there is, we have to
1244  * return it.
1245  *
1246  * Note, this whole situation is possible because we allow to have
1247  * elements which are equivalent to the next key in the parent in the
1248  * children of current znode. For example, this happens if we split a
1249  * znode like this: | 3 | 5 | 5 | 6 | 7 |, which results in something
1250  * like this:
1251  * | 3 | 5 |
1252  * / \
1253  * | 3 | 5 | | 5 | 6 | 7 |
1254  * ^
1255  * And this becomes what is at the first "picture" after key "5" marked
1256  * with "^" is removed. What could be done is we could prohibit
1257  * splitting in the middle of the colliding sequence. Also, when
1258  * removing the leftmost key, we would have to correct the key of the
1259  * parent node, which would introduce additional complications. Namely,
1260  * if we changed the leftmost key of the parent znode, the garbage
1261  * collector would be unable to find it (GC is doing this when GC'ing
1262  * indexing LEBs). Although we already have an additional RB-tree where
1263  * we save such changed znodes (see 'ins_clr_old_idx_znode()') until
1264  * after the commit. But anyway, this does not look easy to implement
1265  * so we did not try this.
1266  */
1267  err = tnc_prev(c, &znode, n);
1268  if (err == -ENOENT) {
1269  dbg_tnc("found 0, lvl %d, n -1", znode->level);
1270  *n = -1;
1271  return 0;
1272  }
1273  if (unlikely(err < 0))
1274  return err;
1275  if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1276  dbg_tnc("found 0, lvl %d, n -1", znode->level);
1277  *n = -1;
1278  return 0;
1279  }
1280 
1281  dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1282  *zn = znode;
1283  return 1;
1284 }
1285 
1311 static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
1312  struct ubifs_znode **zn, int *n)
1313 {
1314  int err, exact;
1315  struct ubifs_znode *znode;
1316  unsigned long time = get_seconds();
1317 
1318  dbg_tnck(key, "search and dirty key ");
1319 
1320  znode = c->zroot.znode;
1321  if (unlikely(!znode)) {
1322  znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1323  if (IS_ERR(znode))
1324  return PTR_ERR(znode);
1325  }
1326 
1327  znode = dirty_cow_znode(c, &c->zroot);
1328  if (IS_ERR(znode))
1329  return PTR_ERR(znode);
1330 
1331  znode->time = time;
1332 
1333  while (1) {
1334  struct ubifs_zbranch *zbr;
1335 
1336  exact = ubifs_search_zbranch(c, znode, key, n);
1337 
1338  if (znode->level == 0)
1339  break;
1340 
1341  if (*n < 0)
1342  *n = 0;
1343  zbr = &znode->zbranch[*n];
1344 
1345  if (zbr->znode) {
1346  znode->time = time;
1347  znode = dirty_cow_znode(c, zbr);
1348  if (IS_ERR(znode))
1349  return PTR_ERR(znode);
1350  continue;
1351  }
1352 
1353  /* znode is not in TNC cache, load it from the media */
1354  znode = ubifs_load_znode(c, zbr, znode, *n);
1355  if (IS_ERR(znode))
1356  return PTR_ERR(znode);
1357  znode = dirty_cow_znode(c, zbr);
1358  if (IS_ERR(znode))
1359  return PTR_ERR(znode);
1360  }
1361 
1362  *zn = znode;
1363  if (exact || !is_hash_key(c, key) || *n != -1) {
1364  dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
1365  return exact;
1366  }
1367 
1368  /*
1369  * See huge comment at 'lookup_level0_dirty()' what is the rest of the
1370  * code.
1371  */
1372  err = tnc_prev(c, &znode, n);
1373  if (err == -ENOENT) {
1374  *n = -1;
1375  dbg_tnc("found 0, lvl %d, n -1", znode->level);
1376  return 0;
1377  }
1378  if (unlikely(err < 0))
1379  return err;
1380  if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
1381  *n = -1;
1382  dbg_tnc("found 0, lvl %d, n -1", znode->level);
1383  return 0;
1384  }
1385 
1386  if (znode->cnext || !ubifs_zn_dirty(znode)) {
1387  znode = dirty_cow_bottom_up(c, znode);
1388  if (IS_ERR(znode))
1389  return PTR_ERR(znode);
1390  }
1391 
1392  dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
1393  *zn = znode;
1394  return 1;
1395 }
1396 
1407 static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1)
1408 {
1409  int gc_seq2, gced_lnum;
1410 
1411  gced_lnum = c->gced_lnum;
1412  smp_rmb();
1413  gc_seq2 = c->gc_seq;
1414  /* Same seq means no GC */
1415  if (gc_seq1 == gc_seq2)
1416  return 0;
1417  /* Different by more than 1 means we don't know */
1418  if (gc_seq1 + 1 != gc_seq2)
1419  return 1;
1420  /*
1421  * We have seen the sequence number has increased by 1. Now we need to
1422  * be sure we read the right LEB number, so read it again.
1423  */
1424  smp_rmb();
1425  if (gced_lnum != c->gced_lnum)
1426  return 1;
1427  /* Finally we can check lnum */
1428  if (gced_lnum == lnum)
1429  return 1;
1430  return 0;
1431 }
1432 
1446 int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
1447  void *node, int *lnum, int *offs)
1448 {
1449  int found, n, err, safely = 0, gc_seq1;
1450  struct ubifs_znode *znode;
1451  struct ubifs_zbranch zbr, *zt;
1452 
1453 again:
1454  mutex_lock(&c->tnc_mutex);
1455  found = ubifs_lookup_level0(c, key, &znode, &n);
1456  if (!found) {
1457  err = -ENOENT;
1458  goto out;
1459  } else if (found < 0) {
1460  err = found;
1461  goto out;
1462  }
1463  zt = &znode->zbranch[n];
1464  if (lnum) {
1465  *lnum = zt->lnum;
1466  *offs = zt->offs;
1467  }
1468  if (is_hash_key(c, key)) {
1469  /*
1470  * In this case the leaf node cache gets used, so we pass the
1471  * address of the zbranch and keep the mutex locked
1472  */
1473  err = tnc_read_node_nm(c, zt, node);
1474  goto out;
1475  }
1476  if (safely) {
1477  err = ubifs_tnc_read_node(c, zt, node);
1478  goto out;
1479  }
1480  /* Drop the TNC mutex prematurely and race with garbage collection */
1481  zbr = znode->zbranch[n];
1482  gc_seq1 = c->gc_seq;
1483  mutex_unlock(&c->tnc_mutex);
1484 
1485  if (ubifs_get_wbuf(c, zbr.lnum)) {
1486  /* We do not GC journal heads */
1487  err = ubifs_tnc_read_node(c, &zbr, node);
1488  return err;
1489  }
1490 
1491  err = fallible_read_node(c, key, &zbr, node);
1492  if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) {
1493  /*
1494  * The node may have been GC'ed out from under us so try again
1495  * while keeping the TNC mutex locked.
1496  */
1497  safely = 1;
1498  goto again;
1499  }
1500  return 0;
1501 
1502 out:
1503  mutex_unlock(&c->tnc_mutex);
1504  return err;
1505 }
1506 
1520 int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
1521 {
1522  int n, err = 0, lnum = -1, uninitialized_var(offs);
1523  int uninitialized_var(len);
1524  unsigned int block = key_block(c, &bu->key);
1525  struct ubifs_znode *znode;
1526 
1527  bu->cnt = 0;
1528  bu->blk_cnt = 0;
1529  bu->eof = 0;
1530 
1531  mutex_lock(&c->tnc_mutex);
1532  /* Find first key */
1533  err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
1534  if (err < 0)
1535  goto out;
1536  if (err) {
1537  /* Key found */
1538  len = znode->zbranch[n].len;
1539  /* The buffer must be big enough for at least 1 node */
1540  if (len > bu->buf_len) {
1541  err = -EINVAL;
1542  goto out;
1543  }
1544  /* Add this key */
1545  bu->zbranch[bu->cnt++] = znode->zbranch[n];
1546  bu->blk_cnt += 1;
1547  lnum = znode->zbranch[n].lnum;
1548  offs = ALIGN(znode->zbranch[n].offs + len, 8);
1549  }
1550  while (1) {
1551  struct ubifs_zbranch *zbr;
1552  union ubifs_key *key;
1553  unsigned int next_block;
1554 
1555  /* Find next key */
1556  err = tnc_next(c, &znode, &n);
1557  if (err)
1558  goto out;
1559  zbr = &znode->zbranch[n];
1560  key = &zbr->key;
1561  /* See if there is another data key for this file */
1562  if (key_inum(c, key) != key_inum(c, &bu->key) ||
1563  key_type(c, key) != UBIFS_DATA_KEY) {
1564  err = -ENOENT;
1565  goto out;
1566  }
1567  if (lnum < 0) {
1568  /* First key found */
1569  lnum = zbr->lnum;
1570  offs = ALIGN(zbr->offs + zbr->len, 8);
1571  len = zbr->len;
1572  if (len > bu->buf_len) {
1573  err = -EINVAL;
1574  goto out;
1575  }
1576  } else {
1577  /*
1578  * The data nodes must be in consecutive positions in
1579  * the same LEB.
1580  */
1581  if (zbr->lnum != lnum || zbr->offs != offs)
1582  goto out;
1583  offs += ALIGN(zbr->len, 8);
1584  len = ALIGN(len, 8) + zbr->len;
1585  /* Must not exceed buffer length */
1586  if (len > bu->buf_len)
1587  goto out;
1588  }
1589  /* Allow for holes */
1590  next_block = key_block(c, key);
1591  bu->blk_cnt += (next_block - block - 1);
1592  if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1593  goto out;
1594  block = next_block;
1595  /* Add this key */
1596  bu->zbranch[bu->cnt++] = *zbr;
1597  bu->blk_cnt += 1;
1598  /* See if we have room for more */
1599  if (bu->cnt >= UBIFS_MAX_BULK_READ)
1600  goto out;
1601  if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
1602  goto out;
1603  }
1604 out:
1605  if (err == -ENOENT) {
1606  bu->eof = 1;
1607  err = 0;
1608  }
1609  bu->gc_seq = c->gc_seq;
1610  mutex_unlock(&c->tnc_mutex);
1611  if (err)
1612  return err;
1613  /*
1614  * An enormous hole could cause bulk-read to encompass too many
1615  * page cache pages, so limit the number here.
1616  */
1617  if (bu->blk_cnt > UBIFS_MAX_BULK_READ)
1619  /*
1620  * Ensure that bulk-read covers a whole number of page cache
1621  * pages.
1622  */
1623  if (UBIFS_BLOCKS_PER_PAGE == 1 ||
1624  !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
1625  return 0;
1626  if (bu->eof) {
1627  /* At the end of file we can round up */
1628  bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
1629  return 0;
1630  }
1631  /* Exclude data nodes that do not make up a whole page cache page */
1632  block = key_block(c, &bu->key) + bu->blk_cnt;
1633  block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
1634  while (bu->cnt) {
1635  if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
1636  break;
1637  bu->cnt -= 1;
1638  }
1639  return 0;
1640 }
1641 
1652 static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
1653  int offs)
1654 {
1655  const struct ubifs_info *c = wbuf->c;
1656  int rlen, overlap;
1657 
1658  dbg_io("LEB %d:%d, length %d", lnum, offs, len);
1659  ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1660  ubifs_assert(!(offs & 7) && offs < c->leb_size);
1661  ubifs_assert(offs + len <= c->leb_size);
1662 
1663  spin_lock(&wbuf->lock);
1664  overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1665  if (!overlap) {
1666  /* We may safely unlock the write-buffer and read the data */
1667  spin_unlock(&wbuf->lock);
1668  return ubifs_leb_read(c, lnum, buf, offs, len, 0);
1669  }
1670 
1671  /* Don't read under wbuf */
1672  rlen = wbuf->offs - offs;
1673  if (rlen < 0)
1674  rlen = 0;
1675 
1676  /* Copy the rest from the write-buffer */
1677  memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1678  spin_unlock(&wbuf->lock);
1679 
1680  if (rlen > 0)
1681  /* Read everything that goes before write-buffer */
1682  return ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
1683 
1684  return 0;
1685 }
1686 
1695 static int validate_data_node(struct ubifs_info *c, void *buf,
1696  struct ubifs_zbranch *zbr)
1697 {
1698  union ubifs_key key1;
1699  struct ubifs_ch *ch = buf;
1700  int err, len;
1701 
1702  if (ch->node_type != UBIFS_DATA_NODE) {
1703  ubifs_err("bad node type (%d but expected %d)",
1704  ch->node_type, UBIFS_DATA_NODE);
1705  goto out_err;
1706  }
1707 
1708  err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
1709  if (err) {
1710  ubifs_err("expected node type %d", UBIFS_DATA_NODE);
1711  goto out;
1712  }
1713 
1714  len = le32_to_cpu(ch->len);
1715  if (len != zbr->len) {
1716  ubifs_err("bad node length %d, expected %d", len, zbr->len);
1717  goto out_err;
1718  }
1719 
1720  /* Make sure the key of the read node is correct */
1721  key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
1722  if (!keys_eq(c, &zbr->key, &key1)) {
1723  ubifs_err("bad key in node at LEB %d:%d",
1724  zbr->lnum, zbr->offs);
1725  dbg_tnck(&zbr->key, "looked for key ");
1726  dbg_tnck(&key1, "found node's key ");
1727  goto out_err;
1728  }
1729 
1730  return 0;
1731 
1732 out_err:
1733  err = -EINVAL;
1734 out:
1735  ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
1736  ubifs_dump_node(c, buf);
1737  dump_stack();
1738  return err;
1739 }
1740 
1751 int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
1752 {
1753  int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
1754  struct ubifs_wbuf *wbuf;
1755  void *buf;
1756 
1757  len = bu->zbranch[bu->cnt - 1].offs;
1758  len += bu->zbranch[bu->cnt - 1].len - offs;
1759  if (len > bu->buf_len) {
1760  ubifs_err("buffer too small %d vs %d", bu->buf_len, len);
1761  return -EINVAL;
1762  }
1763 
1764  /* Do the read */
1765  wbuf = ubifs_get_wbuf(c, lnum);
1766  if (wbuf)
1767  err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
1768  else
1769  err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0);
1770 
1771  /* Check for a race with GC */
1772  if (maybe_leb_gced(c, lnum, bu->gc_seq))
1773  return -EAGAIN;
1774 
1775  if (err && err != -EBADMSG) {
1776  ubifs_err("failed to read from LEB %d:%d, error %d",
1777  lnum, offs, err);
1778  dump_stack();
1779  dbg_tnck(&bu->key, "key ");
1780  return err;
1781  }
1782 
1783  /* Validate the nodes read */
1784  buf = bu->buf;
1785  for (i = 0; i < bu->cnt; i++) {
1786  err = validate_data_node(c, buf, &bu->zbranch[i]);
1787  if (err)
1788  return err;
1789  buf = buf + ALIGN(bu->zbranch[i].len, 8);
1790  }
1791 
1792  return 0;
1793 }
1794 
1808 static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1809  void *node, const struct qstr *nm)
1810 {
1811  int found, n, err;
1812  struct ubifs_znode *znode;
1813 
1814  dbg_tnck(key, "name '%.*s' key ", nm->len, nm->name);
1815  mutex_lock(&c->tnc_mutex);
1816  found = ubifs_lookup_level0(c, key, &znode, &n);
1817  if (!found) {
1818  err = -ENOENT;
1819  goto out_unlock;
1820  } else if (found < 0) {
1821  err = found;
1822  goto out_unlock;
1823  }
1824 
1825  ubifs_assert(n >= 0);
1826 
1827  err = resolve_collision(c, key, &znode, &n, nm);
1828  dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
1829  if (unlikely(err < 0))
1830  goto out_unlock;
1831  if (err == 0) {
1832  err = -ENOENT;
1833  goto out_unlock;
1834  }
1835 
1836  err = tnc_read_node_nm(c, &znode->zbranch[n], node);
1837 
1838 out_unlock:
1839  mutex_unlock(&c->tnc_mutex);
1840  return err;
1841 }
1842 
1856 int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
1857  void *node, const struct qstr *nm)
1858 {
1859  int err, len;
1860  const struct ubifs_dent_node *dent = node;
1861 
1862  /*
1863  * We assume that in most of the cases there are no name collisions and
1864  * 'ubifs_tnc_lookup()' returns us the right direntry.
1865  */
1866  err = ubifs_tnc_lookup(c, key, node);
1867  if (err)
1868  return err;
1869 
1870  len = le16_to_cpu(dent->nlen);
1871  if (nm->len == len && !memcmp(dent->name, nm->name, len))
1872  return 0;
1873 
1874  /*
1875  * Unluckily, there are hash collisions and we have to iterate over
1876  * them look at each direntry with colliding name hash sequentially.
1877  */
1878  return do_lookup_nm(c, key, node, nm);
1879 }
1880 
1890 static void correct_parent_keys(const struct ubifs_info *c,
1891  struct ubifs_znode *znode)
1892 {
1893  union ubifs_key *key, *key1;
1894 
1895  ubifs_assert(znode->parent);
1896  ubifs_assert(znode->iip == 0);
1897 
1898  key = &znode->zbranch[0].key;
1899  key1 = &znode->parent->zbranch[0].key;
1900 
1901  while (keys_cmp(c, key, key1) < 0) {
1902  key_copy(c, key, key1);
1903  znode = znode->parent;
1904  znode->alt = 1;
1905  if (!znode->parent || znode->iip)
1906  break;
1907  key1 = &znode->parent->zbranch[0].key;
1908  }
1909 }
1910 
1922 static void insert_zbranch(struct ubifs_znode *znode,
1923  const struct ubifs_zbranch *zbr, int n)
1924 {
1925  int i;
1926 
1927  ubifs_assert(ubifs_zn_dirty(znode));
1928 
1929  if (znode->level) {
1930  for (i = znode->child_cnt; i > n; i--) {
1931  znode->zbranch[i] = znode->zbranch[i - 1];
1932  if (znode->zbranch[i].znode)
1933  znode->zbranch[i].znode->iip = i;
1934  }
1935  if (zbr->znode)
1936  zbr->znode->iip = n;
1937  } else
1938  for (i = znode->child_cnt; i > n; i--)
1939  znode->zbranch[i] = znode->zbranch[i - 1];
1940 
1941  znode->zbranch[n] = *zbr;
1942  znode->child_cnt += 1;
1943 
1944  /*
1945  * After inserting at slot zero, the lower bound of the key range of
1946  * this znode may have changed. If this znode is subsequently split
1947  * then the upper bound of the key range may change, and furthermore
1948  * it could change to be lower than the original lower bound. If that
1949  * happens, then it will no longer be possible to find this znode in the
1950  * TNC using the key from the index node on flash. That is bad because
1951  * if it is not found, we will assume it is obsolete and may overwrite
1952  * it. Then if there is an unclean unmount, we will start using the
1953  * old index which will be broken.
1954  *
1955  * So we first mark znodes that have insertions at slot zero, and then
1956  * if they are split we add their lnum/offs to the old_idx tree.
1957  */
1958  if (n == 0)
1959  znode->alt = 1;
1960 }
1961 
1974 static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode,
1975  struct ubifs_zbranch *zbr, int n)
1976 {
1977  struct ubifs_znode *zn, *zi, *zp;
1978  int i, keep, move, appending = 0;
1979  union ubifs_key *key = &zbr->key, *key1;
1980 
1981  ubifs_assert(n >= 0 && n <= c->fanout);
1982 
1983  /* Implement naive insert for now */
1984 again:
1985  zp = znode->parent;
1986  if (znode->child_cnt < c->fanout) {
1987  ubifs_assert(n != c->fanout);
1988  dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level);
1989 
1990  insert_zbranch(znode, zbr, n);
1991 
1992  /* Ensure parent's key is correct */
1993  if (n == 0 && zp && znode->iip == 0)
1994  correct_parent_keys(c, znode);
1995 
1996  return 0;
1997  }
1998 
1999  /*
2000  * Unfortunately, @znode does not have more empty slots and we have to
2001  * split it.
2002  */
2003  dbg_tnck(key, "splitting level %d, key ", znode->level);
2004 
2005  if (znode->alt)
2006  /*
2007  * We can no longer be sure of finding this znode by key, so we
2008  * record it in the old_idx tree.
2009  */
2010  ins_clr_old_idx_znode(c, znode);
2011 
2012  zn = kzalloc(c->max_znode_sz, GFP_NOFS);
2013  if (!zn)
2014  return -ENOMEM;
2015  zn->parent = zp;
2016  zn->level = znode->level;
2017 
2018  /* Decide where to split */
2019  if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
2020  /* Try not to split consecutive data keys */
2021  if (n == c->fanout) {
2022  key1 = &znode->zbranch[n - 1].key;
2023  if (key_inum(c, key1) == key_inum(c, key) &&
2024  key_type(c, key1) == UBIFS_DATA_KEY)
2025  appending = 1;
2026  } else
2027  goto check_split;
2028  } else if (appending && n != c->fanout) {
2029  /* Try not to split consecutive data keys */
2030  appending = 0;
2031 check_split:
2032  if (n >= (c->fanout + 1) / 2) {
2033  key1 = &znode->zbranch[0].key;
2034  if (key_inum(c, key1) == key_inum(c, key) &&
2035  key_type(c, key1) == UBIFS_DATA_KEY) {
2036  key1 = &znode->zbranch[n].key;
2037  if (key_inum(c, key1) != key_inum(c, key) ||
2038  key_type(c, key1) != UBIFS_DATA_KEY) {
2039  keep = n;
2040  move = c->fanout - keep;
2041  zi = znode;
2042  goto do_split;
2043  }
2044  }
2045  }
2046  }
2047 
2048  if (appending) {
2049  keep = c->fanout;
2050  move = 0;
2051  } else {
2052  keep = (c->fanout + 1) / 2;
2053  move = c->fanout - keep;
2054  }
2055 
2056  /*
2057  * Although we don't at present, we could look at the neighbors and see
2058  * if we can move some zbranches there.
2059  */
2060 
2061  if (n < keep) {
2062  /* Insert into existing znode */
2063  zi = znode;
2064  move += 1;
2065  keep -= 1;
2066  } else {
2067  /* Insert into new znode */
2068  zi = zn;
2069  n -= keep;
2070  /* Re-parent */
2071  if (zn->level != 0)
2072  zbr->znode->parent = zn;
2073  }
2074 
2075 do_split:
2076 
2077  __set_bit(DIRTY_ZNODE, &zn->flags);
2078  atomic_long_inc(&c->dirty_zn_cnt);
2079 
2080  zn->child_cnt = move;
2081  znode->child_cnt = keep;
2082 
2083  dbg_tnc("moving %d, keeping %d", move, keep);
2084 
2085  /* Move zbranch */
2086  for (i = 0; i < move; i++) {
2087  zn->zbranch[i] = znode->zbranch[keep + i];
2088  /* Re-parent */
2089  if (zn->level != 0)
2090  if (zn->zbranch[i].znode) {
2091  zn->zbranch[i].znode->parent = zn;
2092  zn->zbranch[i].znode->iip = i;
2093  }
2094  }
2095 
2096  /* Insert new key and branch */
2097  dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level);
2098 
2099  insert_zbranch(zi, zbr, n);
2100 
2101  /* Insert new znode (produced by spitting) into the parent */
2102  if (zp) {
2103  if (n == 0 && zi == znode && znode->iip == 0)
2104  correct_parent_keys(c, znode);
2105 
2106  /* Locate insertion point */
2107  n = znode->iip + 1;
2108 
2109  /* Tail recursion */
2110  zbr->key = zn->zbranch[0].key;
2111  zbr->znode = zn;
2112  zbr->lnum = 0;
2113  zbr->offs = 0;
2114  zbr->len = 0;
2115  znode = zp;
2116 
2117  goto again;
2118  }
2119 
2120  /* We have to split root znode */
2121  dbg_tnc("creating new zroot at level %d", znode->level + 1);
2122 
2123  zi = kzalloc(c->max_znode_sz, GFP_NOFS);
2124  if (!zi)
2125  return -ENOMEM;
2126 
2127  zi->child_cnt = 2;
2128  zi->level = znode->level + 1;
2129 
2130  __set_bit(DIRTY_ZNODE, &zi->flags);
2131  atomic_long_inc(&c->dirty_zn_cnt);
2132 
2133  zi->zbranch[0].key = znode->zbranch[0].key;
2134  zi->zbranch[0].znode = znode;
2135  zi->zbranch[0].lnum = c->zroot.lnum;
2136  zi->zbranch[0].offs = c->zroot.offs;
2137  zi->zbranch[0].len = c->zroot.len;
2138  zi->zbranch[1].key = zn->zbranch[0].key;
2139  zi->zbranch[1].znode = zn;
2140 
2141  c->zroot.lnum = 0;
2142  c->zroot.offs = 0;
2143  c->zroot.len = 0;
2144  c->zroot.znode = zi;
2145 
2146  zn->parent = zi;
2147  zn->iip = 1;
2148  znode->parent = zi;
2149  znode->iip = 0;
2150 
2151  return 0;
2152 }
2153 
2166 int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
2167  int offs, int len)
2168 {
2169  int found, n, err = 0;
2170  struct ubifs_znode *znode;
2171 
2172  mutex_lock(&c->tnc_mutex);
2173  dbg_tnck(key, "%d:%d, len %d, key ", lnum, offs, len);
2174  found = lookup_level0_dirty(c, key, &znode, &n);
2175  if (!found) {
2176  struct ubifs_zbranch zbr;
2177 
2178  zbr.znode = NULL;
2179  zbr.lnum = lnum;
2180  zbr.offs = offs;
2181  zbr.len = len;
2182  key_copy(c, key, &zbr.key);
2183  err = tnc_insert(c, znode, &zbr, n + 1);
2184  } else if (found == 1) {
2185  struct ubifs_zbranch *zbr = &znode->zbranch[n];
2186 
2187  lnc_free(zbr);
2188  err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2189  zbr->lnum = lnum;
2190  zbr->offs = offs;
2191  zbr->len = len;
2192  } else
2193  err = found;
2194  if (!err)
2195  err = dbg_check_tnc(c, 0);
2196  mutex_unlock(&c->tnc_mutex);
2197 
2198  return err;
2199 }
2200 
2215 int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
2216  int old_lnum, int old_offs, int lnum, int offs, int len)
2217 {
2218  int found, n, err = 0;
2219  struct ubifs_znode *znode;
2220 
2221  mutex_lock(&c->tnc_mutex);
2222  dbg_tnck(key, "old LEB %d:%d, new LEB %d:%d, len %d, key ", old_lnum,
2223  old_offs, lnum, offs, len);
2224  found = lookup_level0_dirty(c, key, &znode, &n);
2225  if (found < 0) {
2226  err = found;
2227  goto out_unlock;
2228  }
2229 
2230  if (found == 1) {
2231  struct ubifs_zbranch *zbr = &znode->zbranch[n];
2232 
2233  found = 0;
2234  if (zbr->lnum == old_lnum && zbr->offs == old_offs) {
2235  lnc_free(zbr);
2236  err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2237  if (err)
2238  goto out_unlock;
2239  zbr->lnum = lnum;
2240  zbr->offs = offs;
2241  zbr->len = len;
2242  found = 1;
2243  } else if (is_hash_key(c, key)) {
2244  found = resolve_collision_directly(c, key, &znode, &n,
2245  old_lnum, old_offs);
2246  dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d",
2247  found, znode, n, old_lnum, old_offs);
2248  if (found < 0) {
2249  err = found;
2250  goto out_unlock;
2251  }
2252 
2253  if (found) {
2254  /* Ensure the znode is dirtied */
2255  if (znode->cnext || !ubifs_zn_dirty(znode)) {
2256  znode = dirty_cow_bottom_up(c, znode);
2257  if (IS_ERR(znode)) {
2258  err = PTR_ERR(znode);
2259  goto out_unlock;
2260  }
2261  }
2262  zbr = &znode->zbranch[n];
2263  lnc_free(zbr);
2264  err = ubifs_add_dirt(c, zbr->lnum,
2265  zbr->len);
2266  if (err)
2267  goto out_unlock;
2268  zbr->lnum = lnum;
2269  zbr->offs = offs;
2270  zbr->len = len;
2271  }
2272  }
2273  }
2274 
2275  if (!found)
2276  err = ubifs_add_dirt(c, lnum, len);
2277 
2278  if (!err)
2279  err = dbg_check_tnc(c, 0);
2280 
2281 out_unlock:
2282  mutex_unlock(&c->tnc_mutex);
2283  return err;
2284 }
2285 
2298 int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
2299  int lnum, int offs, int len, const struct qstr *nm)
2300 {
2301  int found, n, err = 0;
2302  struct ubifs_znode *znode;
2303 
2304  mutex_lock(&c->tnc_mutex);
2305  dbg_tnck(key, "LEB %d:%d, name '%.*s', key ",
2306  lnum, offs, nm->len, nm->name);
2307  found = lookup_level0_dirty(c, key, &znode, &n);
2308  if (found < 0) {
2309  err = found;
2310  goto out_unlock;
2311  }
2312 
2313  if (found == 1) {
2314  if (c->replaying)
2315  found = fallible_resolve_collision(c, key, &znode, &n,
2316  nm, 1);
2317  else
2318  found = resolve_collision(c, key, &znode, &n, nm);
2319  dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n);
2320  if (found < 0) {
2321  err = found;
2322  goto out_unlock;
2323  }
2324 
2325  /* Ensure the znode is dirtied */
2326  if (znode->cnext || !ubifs_zn_dirty(znode)) {
2327  znode = dirty_cow_bottom_up(c, znode);
2328  if (IS_ERR(znode)) {
2329  err = PTR_ERR(znode);
2330  goto out_unlock;
2331  }
2332  }
2333 
2334  if (found == 1) {
2335  struct ubifs_zbranch *zbr = &znode->zbranch[n];
2336 
2337  lnc_free(zbr);
2338  err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2339  zbr->lnum = lnum;
2340  zbr->offs = offs;
2341  zbr->len = len;
2342  goto out_unlock;
2343  }
2344  }
2345 
2346  if (!found) {
2347  struct ubifs_zbranch zbr;
2348 
2349  zbr.znode = NULL;
2350  zbr.lnum = lnum;
2351  zbr.offs = offs;
2352  zbr.len = len;
2353  key_copy(c, key, &zbr.key);
2354  err = tnc_insert(c, znode, &zbr, n + 1);
2355  if (err)
2356  goto out_unlock;
2357  if (c->replaying) {
2358  /*
2359  * We did not find it in the index so there may be a
2360  * dangling branch still in the index. So we remove it
2361  * by passing 'ubifs_tnc_remove_nm()' the same key but
2362  * an unmatchable name.
2363  */
2364  struct qstr noname = { .name = "" };
2365 
2366  err = dbg_check_tnc(c, 0);
2367  mutex_unlock(&c->tnc_mutex);
2368  if (err)
2369  return err;
2370  return ubifs_tnc_remove_nm(c, key, &noname);
2371  }
2372  }
2373 
2374 out_unlock:
2375  if (!err)
2376  err = dbg_check_tnc(c, 0);
2377  mutex_unlock(&c->tnc_mutex);
2378  return err;
2379 }
2380 
2390 static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
2391 {
2392  struct ubifs_zbranch *zbr;
2393  struct ubifs_znode *zp;
2394  int i, err;
2395 
2396  /* Delete without merge for now */
2397  ubifs_assert(znode->level == 0);
2398  ubifs_assert(n >= 0 && n < c->fanout);
2399  dbg_tnck(&znode->zbranch[n].key, "deleting key ");
2400 
2401  zbr = &znode->zbranch[n];
2402  lnc_free(zbr);
2403 
2404  err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
2405  if (err) {
2406  ubifs_dump_znode(c, znode);
2407  return err;
2408  }
2409 
2410  /* We do not "gap" zbranch slots */
2411  for (i = n; i < znode->child_cnt - 1; i++)
2412  znode->zbranch[i] = znode->zbranch[i + 1];
2413  znode->child_cnt -= 1;
2414 
2415  if (znode->child_cnt > 0)
2416  return 0;
2417 
2418  /*
2419  * This was the last zbranch, we have to delete this znode from the
2420  * parent.
2421  */
2422 
2423  do {
2424  ubifs_assert(!ubifs_zn_obsolete(znode));
2425  ubifs_assert(ubifs_zn_dirty(znode));
2426 
2427  zp = znode->parent;
2428  n = znode->iip;
2429 
2430  atomic_long_dec(&c->dirty_zn_cnt);
2431 
2432  err = insert_old_idx_znode(c, znode);
2433  if (err)
2434  return err;
2435 
2436  if (znode->cnext) {
2437  __set_bit(OBSOLETE_ZNODE, &znode->flags);
2438  atomic_long_inc(&c->clean_zn_cnt);
2439  atomic_long_inc(&ubifs_clean_zn_cnt);
2440  } else
2441  kfree(znode);
2442  znode = zp;
2443  } while (znode->child_cnt == 1); /* while removing last child */
2444 
2445  /* Remove from znode, entry n - 1 */
2446  znode->child_cnt -= 1;
2447  ubifs_assert(znode->level != 0);
2448  for (i = n; i < znode->child_cnt; i++) {
2449  znode->zbranch[i] = znode->zbranch[i + 1];
2450  if (znode->zbranch[i].znode)
2451  znode->zbranch[i].znode->iip = i;
2452  }
2453 
2454  /*
2455  * If this is the root and it has only 1 child then
2456  * collapse the tree.
2457  */
2458  if (!znode->parent) {
2459  while (znode->child_cnt == 1 && znode->level != 0) {
2460  zp = znode;
2461  zbr = &znode->zbranch[0];
2462  znode = get_znode(c, znode, 0);
2463  if (IS_ERR(znode))
2464  return PTR_ERR(znode);
2465  znode = dirty_cow_znode(c, zbr);
2466  if (IS_ERR(znode))
2467  return PTR_ERR(znode);
2468  znode->parent = NULL;
2469  znode->iip = 0;
2470  if (c->zroot.len) {
2471  err = insert_old_idx(c, c->zroot.lnum,
2472  c->zroot.offs);
2473  if (err)
2474  return err;
2475  }
2476  c->zroot.lnum = zbr->lnum;
2477  c->zroot.offs = zbr->offs;
2478  c->zroot.len = zbr->len;
2479  c->zroot.znode = znode;
2480  ubifs_assert(!ubifs_zn_obsolete(zp));
2481  ubifs_assert(ubifs_zn_dirty(zp));
2482  atomic_long_dec(&c->dirty_zn_cnt);
2483 
2484  if (zp->cnext) {
2486  atomic_long_inc(&c->clean_zn_cnt);
2487  atomic_long_inc(&ubifs_clean_zn_cnt);
2488  } else
2489  kfree(zp);
2490  }
2491  }
2492 
2493  return 0;
2494 }
2495 
2503 int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
2504 {
2505  int found, n, err = 0;
2506  struct ubifs_znode *znode;
2507 
2508  mutex_lock(&c->tnc_mutex);
2509  dbg_tnck(key, "key ");
2510  found = lookup_level0_dirty(c, key, &znode, &n);
2511  if (found < 0) {
2512  err = found;
2513  goto out_unlock;
2514  }
2515  if (found == 1)
2516  err = tnc_delete(c, znode, n);
2517  if (!err)
2518  err = dbg_check_tnc(c, 0);
2519 
2520 out_unlock:
2521  mutex_unlock(&c->tnc_mutex);
2522  return err;
2523 }
2524 
2533 int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
2534  const struct qstr *nm)
2535 {
2536  int n, err;
2537  struct ubifs_znode *znode;
2538 
2539  mutex_lock(&c->tnc_mutex);
2540  dbg_tnck(key, "%.*s, key ", nm->len, nm->name);
2541  err = lookup_level0_dirty(c, key, &znode, &n);
2542  if (err < 0)
2543  goto out_unlock;
2544 
2545  if (err) {
2546  if (c->replaying)
2547  err = fallible_resolve_collision(c, key, &znode, &n,
2548  nm, 0);
2549  else
2550  err = resolve_collision(c, key, &znode, &n, nm);
2551  dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
2552  if (err < 0)
2553  goto out_unlock;
2554  if (err) {
2555  /* Ensure the znode is dirtied */
2556  if (znode->cnext || !ubifs_zn_dirty(znode)) {
2557  znode = dirty_cow_bottom_up(c, znode);
2558  if (IS_ERR(znode)) {
2559  err = PTR_ERR(znode);
2560  goto out_unlock;
2561  }
2562  }
2563  err = tnc_delete(c, znode, n);
2564  }
2565  }
2566 
2567 out_unlock:
2568  if (!err)
2569  err = dbg_check_tnc(c, 0);
2570  mutex_unlock(&c->tnc_mutex);
2571  return err;
2572 }
2573 
2583 static int key_in_range(struct ubifs_info *c, union ubifs_key *key,
2584  union ubifs_key *from_key, union ubifs_key *to_key)
2585 {
2586  if (keys_cmp(c, key, from_key) < 0)
2587  return 0;
2588  if (keys_cmp(c, key, to_key) > 0)
2589  return 0;
2590  return 1;
2591 }
2592 
2603 int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
2604  union ubifs_key *to_key)
2605 {
2606  int i, n, k, err = 0;
2607  struct ubifs_znode *znode;
2608  union ubifs_key *key;
2609 
2610  mutex_lock(&c->tnc_mutex);
2611  while (1) {
2612  /* Find first level 0 znode that contains keys to remove */
2613  err = ubifs_lookup_level0(c, from_key, &znode, &n);
2614  if (err < 0)
2615  goto out_unlock;
2616 
2617  if (err)
2618  key = from_key;
2619  else {
2620  err = tnc_next(c, &znode, &n);
2621  if (err == -ENOENT) {
2622  err = 0;
2623  goto out_unlock;
2624  }
2625  if (err < 0)
2626  goto out_unlock;
2627  key = &znode->zbranch[n].key;
2628  if (!key_in_range(c, key, from_key, to_key)) {
2629  err = 0;
2630  goto out_unlock;
2631  }
2632  }
2633 
2634  /* Ensure the znode is dirtied */
2635  if (znode->cnext || !ubifs_zn_dirty(znode)) {
2636  znode = dirty_cow_bottom_up(c, znode);
2637  if (IS_ERR(znode)) {
2638  err = PTR_ERR(znode);
2639  goto out_unlock;
2640  }
2641  }
2642 
2643  /* Remove all keys in range except the first */
2644  for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) {
2645  key = &znode->zbranch[i].key;
2646  if (!key_in_range(c, key, from_key, to_key))
2647  break;
2648  lnc_free(&znode->zbranch[i]);
2649  err = ubifs_add_dirt(c, znode->zbranch[i].lnum,
2650  znode->zbranch[i].len);
2651  if (err) {
2652  ubifs_dump_znode(c, znode);
2653  goto out_unlock;
2654  }
2655  dbg_tnck(key, "removing key ");
2656  }
2657  if (k) {
2658  for (i = n + 1 + k; i < znode->child_cnt; i++)
2659  znode->zbranch[i - k] = znode->zbranch[i];
2660  znode->child_cnt -= k;
2661  }
2662 
2663  /* Now delete the first */
2664  err = tnc_delete(c, znode, n);
2665  if (err)
2666  goto out_unlock;
2667  }
2668 
2669 out_unlock:
2670  if (!err)
2671  err = dbg_check_tnc(c, 0);
2672  mutex_unlock(&c->tnc_mutex);
2673  return err;
2674 }
2675 
2686 {
2687  union ubifs_key key1, key2;
2688  struct ubifs_dent_node *xent, *pxent = NULL;
2689  struct qstr nm = { .name = NULL };
2690 
2691  dbg_tnc("ino %lu", (unsigned long)inum);
2692 
2693  /*
2694  * Walk all extended attribute entries and remove them together with
2695  * corresponding extended attribute inodes.
2696  */
2697  lowest_xent_key(c, &key1, inum);
2698  while (1) {
2699  ino_t xattr_inum;
2700  int err;
2701 
2702  xent = ubifs_tnc_next_ent(c, &key1, &nm);
2703  if (IS_ERR(xent)) {
2704  err = PTR_ERR(xent);
2705  if (err == -ENOENT)
2706  break;
2707  return err;
2708  }
2709 
2710  xattr_inum = le64_to_cpu(xent->inum);
2711  dbg_tnc("xent '%s', ino %lu", xent->name,
2712  (unsigned long)xattr_inum);
2713 
2714  nm.name = xent->name;
2715  nm.len = le16_to_cpu(xent->nlen);
2716  err = ubifs_tnc_remove_nm(c, &key1, &nm);
2717  if (err) {
2718  kfree(xent);
2719  return err;
2720  }
2721 
2722  lowest_ino_key(c, &key1, xattr_inum);
2723  highest_ino_key(c, &key2, xattr_inum);
2724  err = ubifs_tnc_remove_range(c, &key1, &key2);
2725  if (err) {
2726  kfree(xent);
2727  return err;
2728  }
2729 
2730  kfree(pxent);
2731  pxent = xent;
2732  key_read(c, &xent->key, &key1);
2733  }
2734 
2735  kfree(pxent);
2736  lowest_ino_key(c, &key1, inum);
2737  highest_ino_key(c, &key2, inum);
2738 
2739  return ubifs_tnc_remove_range(c, &key1, &key2);
2740 }
2741 
2766  union ubifs_key *key,
2767  const struct qstr *nm)
2768 {
2769  int n, err, type = key_type(c, key);
2770  struct ubifs_znode *znode;
2771  struct ubifs_dent_node *dent;
2772  struct ubifs_zbranch *zbr;
2773  union ubifs_key *dkey;
2774 
2775  dbg_tnck(key, "%s ", nm->name ? (char *)nm->name : "(lowest)");
2776  ubifs_assert(is_hash_key(c, key));
2777 
2778  mutex_lock(&c->tnc_mutex);
2779  err = ubifs_lookup_level0(c, key, &znode, &n);
2780  if (unlikely(err < 0))
2781  goto out_unlock;
2782 
2783  if (nm->name) {
2784  if (err) {
2785  /* Handle collisions */
2786  err = resolve_collision(c, key, &znode, &n, nm);
2787  dbg_tnc("rc returned %d, znode %p, n %d",
2788  err, znode, n);
2789  if (unlikely(err < 0))
2790  goto out_unlock;
2791  }
2792 
2793  /* Now find next entry */
2794  err = tnc_next(c, &znode, &n);
2795  if (unlikely(err))
2796  goto out_unlock;
2797  } else {
2798  /*
2799  * The full name of the entry was not given, in which case the
2800  * behavior of this function is a little different and it
2801  * returns current entry, not the next one.
2802  */
2803  if (!err) {
2804  /*
2805  * However, the given key does not exist in the TNC
2806  * tree and @znode/@n variables contain the closest
2807  * "preceding" element. Switch to the next one.
2808  */
2809  err = tnc_next(c, &znode, &n);
2810  if (err)
2811  goto out_unlock;
2812  }
2813  }
2814 
2815  zbr = &znode->zbranch[n];
2816  dent = kmalloc(zbr->len, GFP_NOFS);
2817  if (unlikely(!dent)) {
2818  err = -ENOMEM;
2819  goto out_unlock;
2820  }
2821 
2822  /*
2823  * The above 'tnc_next()' call could lead us to the next inode, check
2824  * this.
2825  */
2826  dkey = &zbr->key;
2827  if (key_inum(c, dkey) != key_inum(c, key) ||
2828  key_type(c, dkey) != type) {
2829  err = -ENOENT;
2830  goto out_free;
2831  }
2832 
2833  err = tnc_read_node_nm(c, zbr, dent);
2834  if (unlikely(err))
2835  goto out_free;
2836 
2837  mutex_unlock(&c->tnc_mutex);
2838  return dent;
2839 
2840 out_free:
2841  kfree(dent);
2842 out_unlock:
2843  mutex_unlock(&c->tnc_mutex);
2844  return ERR_PTR(err);
2845 }
2846 
2853 static void tnc_destroy_cnext(struct ubifs_info *c)
2854 {
2855  struct ubifs_znode *cnext;
2856 
2857  if (!c->cnext)
2858  return;
2860  cnext = c->cnext;
2861  do {
2862  struct ubifs_znode *znode = cnext;
2863 
2864  cnext = cnext->cnext;
2865  if (ubifs_zn_obsolete(znode))
2866  kfree(znode);
2867  } while (cnext && cnext != c->cnext);
2868 }
2869 
2875 {
2876  tnc_destroy_cnext(c);
2877  if (c->zroot.znode) {
2878  long n;
2879 
2880  ubifs_destroy_tnc_subtree(c->zroot.znode);
2881  n = atomic_long_read(&c->clean_zn_cnt);
2882  atomic_long_sub(n, &ubifs_clean_zn_cnt);
2883  }
2884  kfree(c->gap_lebs);
2885  kfree(c->ilebs);
2886  destroy_old_idx(c);
2887 }
2888 
2897 static struct ubifs_znode *left_znode(struct ubifs_info *c,
2898  struct ubifs_znode *znode)
2899 {
2900  int level = znode->level;
2901 
2902  while (1) {
2903  int n = znode->iip - 1;
2904 
2905  /* Go up until we can go left */
2906  znode = znode->parent;
2907  if (!znode)
2908  return NULL;
2909  if (n >= 0) {
2910  /* Now go down the rightmost branch to 'level' */
2911  znode = get_znode(c, znode, n);
2912  if (IS_ERR(znode))
2913  return znode;
2914  while (znode->level != level) {
2915  n = znode->child_cnt - 1;
2916  znode = get_znode(c, znode, n);
2917  if (IS_ERR(znode))
2918  return znode;
2919  }
2920  break;
2921  }
2922  }
2923  return znode;
2924 }
2925 
2934 static struct ubifs_znode *right_znode(struct ubifs_info *c,
2935  struct ubifs_znode *znode)
2936 {
2937  int level = znode->level;
2938 
2939  while (1) {
2940  int n = znode->iip + 1;
2941 
2942  /* Go up until we can go right */
2943  znode = znode->parent;
2944  if (!znode)
2945  return NULL;
2946  if (n < znode->child_cnt) {
2947  /* Now go down the leftmost branch to 'level' */
2948  znode = get_znode(c, znode, n);
2949  if (IS_ERR(znode))
2950  return znode;
2951  while (znode->level != level) {
2952  znode = get_znode(c, znode, 0);
2953  if (IS_ERR(znode))
2954  return znode;
2955  }
2956  break;
2957  }
2958  }
2959  return znode;
2960 }
2961 
2987 static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
2988  union ubifs_key *key, int level,
2989  int lnum, int offs)
2990 {
2991  struct ubifs_znode *znode, *zn;
2992  int n, nn;
2993 
2994  ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
2995 
2996  /*
2997  * The arguments have probably been read off flash, so don't assume
2998  * they are valid.
2999  */
3000  if (level < 0)
3001  return ERR_PTR(-EINVAL);
3002 
3003  /* Get the root znode */
3004  znode = c->zroot.znode;
3005  if (!znode) {
3006  znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
3007  if (IS_ERR(znode))
3008  return znode;
3009  }
3010  /* Check if it is the one we are looking for */
3011  if (c->zroot.lnum == lnum && c->zroot.offs == offs)
3012  return znode;
3013  /* Descend to the parent level i.e. (level + 1) */
3014  if (level >= znode->level)
3015  return NULL;
3016  while (1) {
3017  ubifs_search_zbranch(c, znode, key, &n);
3018  if (n < 0) {
3019  /*
3020  * We reached a znode where the leftmost key is greater
3021  * than the key we are searching for. This is the same
3022  * situation as the one described in a huge comment at
3023  * the end of the 'ubifs_lookup_level0()' function. And
3024  * for exactly the same reasons we have to try to look
3025  * left before giving up.
3026  */
3027  znode = left_znode(c, znode);
3028  if (!znode)
3029  return NULL;
3030  if (IS_ERR(znode))
3031  return znode;
3032  ubifs_search_zbranch(c, znode, key, &n);
3033  ubifs_assert(n >= 0);
3034  }
3035  if (znode->level == level + 1)
3036  break;
3037  znode = get_znode(c, znode, n);
3038  if (IS_ERR(znode))
3039  return znode;
3040  }
3041  /* Check if the child is the one we are looking for */
3042  if (znode->zbranch[n].lnum == lnum && znode->zbranch[n].offs == offs)
3043  return get_znode(c, znode, n);
3044  /* If the key is unique, there is nowhere else to look */
3045  if (!is_hash_key(c, key))
3046  return NULL;
3047  /*
3048  * The key is not unique and so may be also in the znodes to either
3049  * side.
3050  */
3051  zn = znode;
3052  nn = n;
3053  /* Look left */
3054  while (1) {
3055  /* Move one branch to the left */
3056  if (n)
3057  n -= 1;
3058  else {
3059  znode = left_znode(c, znode);
3060  if (!znode)
3061  break;
3062  if (IS_ERR(znode))
3063  return znode;
3064  n = znode->child_cnt - 1;
3065  }
3066  /* Check it */
3067  if (znode->zbranch[n].lnum == lnum &&
3068  znode->zbranch[n].offs == offs)
3069  return get_znode(c, znode, n);
3070  /* Stop if the key is less than the one we are looking for */
3071  if (keys_cmp(c, &znode->zbranch[n].key, key) < 0)
3072  break;
3073  }
3074  /* Back to the middle */
3075  znode = zn;
3076  n = nn;
3077  /* Look right */
3078  while (1) {
3079  /* Move one branch to the right */
3080  if (++n >= znode->child_cnt) {
3081  znode = right_znode(c, znode);
3082  if (!znode)
3083  break;
3084  if (IS_ERR(znode))
3085  return znode;
3086  n = 0;
3087  }
3088  /* Check it */
3089  if (znode->zbranch[n].lnum == lnum &&
3090  znode->zbranch[n].offs == offs)
3091  return get_znode(c, znode, n);
3092  /* Stop if the key is greater than the one we are looking for */
3093  if (keys_cmp(c, &znode->zbranch[n].key, key) > 0)
3094  break;
3095  }
3096  return NULL;
3097 }
3098 
3116 int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
3117  int lnum, int offs)
3118 {
3119  struct ubifs_znode *znode;
3120 
3121  znode = lookup_znode(c, key, level, lnum, offs);
3122  if (!znode)
3123  return 0;
3124  if (IS_ERR(znode))
3125  return PTR_ERR(znode);
3126 
3127  return ubifs_zn_dirty(znode) ? 1 : 2;
3128 }
3129 
3143 static int is_leaf_node_in_tnc(struct ubifs_info *c, union ubifs_key *key,
3144  int lnum, int offs)
3145 {
3146  struct ubifs_zbranch *zbr;
3147  struct ubifs_znode *znode, *zn;
3148  int n, found, err, nn;
3149  const int unique = !is_hash_key(c, key);
3150 
3151  found = ubifs_lookup_level0(c, key, &znode, &n);
3152  if (found < 0)
3153  return found; /* Error code */
3154  if (!found)
3155  return 0;
3156  zbr = &znode->zbranch[n];
3157  if (lnum == zbr->lnum && offs == zbr->offs)
3158  return 1; /* Found it */
3159  if (unique)
3160  return 0;
3161  /*
3162  * Because the key is not unique, we have to look left
3163  * and right as well
3164  */
3165  zn = znode;
3166  nn = n;
3167  /* Look left */
3168  while (1) {
3169  err = tnc_prev(c, &znode, &n);
3170  if (err == -ENOENT)
3171  break;
3172  if (err)
3173  return err;
3174  if (keys_cmp(c, key, &znode->zbranch[n].key))
3175  break;
3176  zbr = &znode->zbranch[n];
3177  if (lnum == zbr->lnum && offs == zbr->offs)
3178  return 1; /* Found it */
3179  }
3180  /* Look right */
3181  znode = zn;
3182  n = nn;
3183  while (1) {
3184  err = tnc_next(c, &znode, &n);
3185  if (err) {
3186  if (err == -ENOENT)
3187  return 0;
3188  return err;
3189  }
3190  if (keys_cmp(c, key, &znode->zbranch[n].key))
3191  break;
3192  zbr = &znode->zbranch[n];
3193  if (lnum == zbr->lnum && offs == zbr->offs)
3194  return 1; /* Found it */
3195  }
3196  return 0;
3197 }
3198 
3213 int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level,
3214  int lnum, int offs, int is_idx)
3215 {
3216  int err;
3217 
3218  mutex_lock(&c->tnc_mutex);
3219  if (is_idx) {
3220  err = is_idx_node_in_tnc(c, key, level, lnum, offs);
3221  if (err < 0)
3222  goto out_unlock;
3223  if (err == 1)
3224  /* The index node was found but it was dirty */
3225  err = 0;
3226  else if (err == 2)
3227  /* The index node was found and it was clean */
3228  err = 1;
3229  else
3230  BUG_ON(err != 0);
3231  } else
3232  err = is_leaf_node_in_tnc(c, key, lnum, offs);
3233 
3234 out_unlock:
3235  mutex_unlock(&c->tnc_mutex);
3236  return err;
3237 }
3238 
3253 int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level,
3254  int lnum, int offs)
3255 {
3256  struct ubifs_znode *znode;
3257  int err = 0;
3258 
3259  mutex_lock(&c->tnc_mutex);
3260  znode = lookup_znode(c, key, level, lnum, offs);
3261  if (!znode)
3262  goto out_unlock;
3263  if (IS_ERR(znode)) {
3264  err = PTR_ERR(znode);
3265  goto out_unlock;
3266  }
3267  znode = dirty_cow_bottom_up(c, znode);
3268  if (IS_ERR(znode)) {
3269  err = PTR_ERR(znode);
3270  goto out_unlock;
3271  }
3272 
3273 out_unlock:
3274  mutex_unlock(&c->tnc_mutex);
3275  return err;
3276 }
3277 
3289 int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
3290  loff_t size)
3291 {
3292  int err, n;
3293  union ubifs_key from_key, to_key, *key;
3294  struct ubifs_znode *znode;
3295  unsigned int block;
3296 
3297  if (!S_ISREG(inode->i_mode))
3298  return 0;
3299  if (!dbg_is_chk_gen(c))
3300  return 0;
3301 
3302  block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
3303  data_key_init(c, &from_key, inode->i_ino, block);
3304  highest_data_key(c, &to_key, inode->i_ino);
3305 
3306  mutex_lock(&c->tnc_mutex);
3307  err = ubifs_lookup_level0(c, &from_key, &znode, &n);
3308  if (err < 0)
3309  goto out_unlock;
3310 
3311  if (err) {
3312  err = -EINVAL;
3313  key = &from_key;
3314  goto out_dump;
3315  }
3316 
3317  err = tnc_next(c, &znode, &n);
3318  if (err == -ENOENT) {
3319  err = 0;
3320  goto out_unlock;
3321  }
3322  if (err < 0)
3323  goto out_unlock;
3324 
3325  ubifs_assert(err == 0);
3326  key = &znode->zbranch[n].key;
3327  if (!key_in_range(c, key, &from_key, &to_key))
3328  goto out_unlock;
3329 
3330 out_dump:
3331  block = key_block(c, key);
3332  ubifs_err("inode %lu has size %lld, but there are data at offset %lld",
3333  (unsigned long)inode->i_ino, size,
3334  ((loff_t)block) << UBIFS_BLOCK_SHIFT);
3335  mutex_unlock(&c->tnc_mutex);
3336  ubifs_dump_inode(c, inode);
3337  dump_stack();
3338  return -EINVAL;
3339 
3340 out_unlock:
3341  mutex_unlock(&c->tnc_mutex);
3342  return err;
3343 }