Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
debug.c
Go to the documentation of this file.
1 /*
2  * This file is part of UBIFS.
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 51
17  * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18  *
19  * Authors: Artem Bityutskiy (Битюцкий Артём)
20  * Adrian Hunter
21  */
22 
23 /*
24  * This file implements most of the debugging stuff which is compiled in only
25  * when it is enabled. But some debugging check functions are implemented in
26  * corresponding subsystem, just because they are closely related and utilize
27  * various local functions of those subsystems.
28  */
29 
30 #include <linux/module.h>
31 #include <linux/debugfs.h>
32 #include <linux/math64.h>
33 #include <linux/uaccess.h>
34 #include <linux/random.h>
35 #include "ubifs.h"
36 
37 static DEFINE_SPINLOCK(dbg_lock);
38 
39 static const char *get_key_fmt(int fmt)
40 {
41  switch (fmt) {
43  return "simple";
44  default:
45  return "unknown/invalid format";
46  }
47 }
48 
49 static const char *get_key_hash(int hash)
50 {
51  switch (hash) {
52  case UBIFS_KEY_HASH_R5:
53  return "R5";
55  return "test";
56  default:
57  return "unknown/invalid name hash";
58  }
59 }
60 
61 static const char *get_key_type(int type)
62 {
63  switch (type) {
64  case UBIFS_INO_KEY:
65  return "inode";
66  case UBIFS_DENT_KEY:
67  return "direntry";
68  case UBIFS_XENT_KEY:
69  return "xentry";
70  case UBIFS_DATA_KEY:
71  return "data";
72  case UBIFS_TRUN_KEY:
73  return "truncate";
74  default:
75  return "unknown/invalid key";
76  }
77 }
78 
79 static const char *get_dent_type(int type)
80 {
81  switch (type) {
82  case UBIFS_ITYPE_REG:
83  return "file";
84  case UBIFS_ITYPE_DIR:
85  return "dir";
86  case UBIFS_ITYPE_LNK:
87  return "symlink";
88  case UBIFS_ITYPE_BLK:
89  return "blkdev";
90  case UBIFS_ITYPE_CHR:
91  return "char dev";
92  case UBIFS_ITYPE_FIFO:
93  return "fifo";
94  case UBIFS_ITYPE_SOCK:
95  return "socket";
96  default:
97  return "unknown/invalid type";
98  }
99 }
100 
101 const char *dbg_snprintf_key(const struct ubifs_info *c,
102  const union ubifs_key *key, char *buffer, int len)
103 {
104  char *p = buffer;
105  int type = key_type(c, key);
106 
107  if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) {
108  switch (type) {
109  case UBIFS_INO_KEY:
110  len -= snprintf(p, len, "(%lu, %s)",
111  (unsigned long)key_inum(c, key),
112  get_key_type(type));
113  break;
114  case UBIFS_DENT_KEY:
115  case UBIFS_XENT_KEY:
116  len -= snprintf(p, len, "(%lu, %s, %#08x)",
117  (unsigned long)key_inum(c, key),
118  get_key_type(type), key_hash(c, key));
119  break;
120  case UBIFS_DATA_KEY:
121  len -= snprintf(p, len, "(%lu, %s, %u)",
122  (unsigned long)key_inum(c, key),
123  get_key_type(type), key_block(c, key));
124  break;
125  case UBIFS_TRUN_KEY:
126  len -= snprintf(p, len, "(%lu, %s)",
127  (unsigned long)key_inum(c, key),
128  get_key_type(type));
129  break;
130  default:
131  len -= snprintf(p, len, "(bad key type: %#08x, %#08x)",
132  key->u32[0], key->u32[1]);
133  }
134  } else
135  len -= snprintf(p, len, "bad key format %d", c->key_fmt);
136  ubifs_assert(len > 0);
137  return p;
138 }
139 
140 const char *dbg_ntype(int type)
141 {
142  switch (type) {
143  case UBIFS_PAD_NODE:
144  return "padding node";
145  case UBIFS_SB_NODE:
146  return "superblock node";
147  case UBIFS_MST_NODE:
148  return "master node";
149  case UBIFS_REF_NODE:
150  return "reference node";
151  case UBIFS_INO_NODE:
152  return "inode node";
153  case UBIFS_DENT_NODE:
154  return "direntry node";
155  case UBIFS_XENT_NODE:
156  return "xentry node";
157  case UBIFS_DATA_NODE:
158  return "data node";
159  case UBIFS_TRUN_NODE:
160  return "truncate node";
161  case UBIFS_IDX_NODE:
162  return "indexing node";
163  case UBIFS_CS_NODE:
164  return "commit start node";
165  case UBIFS_ORPH_NODE:
166  return "orphan node";
167  default:
168  return "unknown node";
169  }
170 }
171 
172 static const char *dbg_gtype(int type)
173 {
174  switch (type) {
175  case UBIFS_NO_NODE_GROUP:
176  return "no node group";
177  case UBIFS_IN_NODE_GROUP:
178  return "in node group";
180  return "last of node group";
181  default:
182  return "unknown";
183  }
184 }
185 
186 const char *dbg_cstate(int cmt_state)
187 {
188  switch (cmt_state) {
189  case COMMIT_RESTING:
190  return "commit resting";
191  case COMMIT_BACKGROUND:
192  return "background commit requested";
193  case COMMIT_REQUIRED:
194  return "commit required";
196  return "BACKGROUND commit running";
198  return "commit running and required";
199  case COMMIT_BROKEN:
200  return "broken commit";
201  default:
202  return "unknown commit state";
203  }
204 }
205 
206 const char *dbg_jhead(int jhead)
207 {
208  switch (jhead) {
209  case GCHD:
210  return "0 (GC)";
211  case BASEHD:
212  return "1 (base)";
213  case DATAHD:
214  return "2 (data)";
215  default:
216  return "unknown journal head";
217  }
218 }
219 
220 static void dump_ch(const struct ubifs_ch *ch)
221 {
222  pr_err("\tmagic %#x\n", le32_to_cpu(ch->magic));
223  pr_err("\tcrc %#x\n", le32_to_cpu(ch->crc));
224  pr_err("\tnode_type %d (%s)\n", ch->node_type,
225  dbg_ntype(ch->node_type));
226  pr_err("\tgroup_type %d (%s)\n", ch->group_type,
227  dbg_gtype(ch->group_type));
228  pr_err("\tsqnum %llu\n",
229  (unsigned long long)le64_to_cpu(ch->sqnum));
230  pr_err("\tlen %u\n", le32_to_cpu(ch->len));
231 }
232 
233 void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
234 {
235  const struct ubifs_inode *ui = ubifs_inode(inode);
236  struct qstr nm = { .name = NULL };
237  union ubifs_key key;
238  struct ubifs_dent_node *dent, *pdent = NULL;
239  int count = 2;
240 
241  pr_err("Dump in-memory inode:");
242  pr_err("\tinode %lu\n", inode->i_ino);
243  pr_err("\tsize %llu\n",
244  (unsigned long long)i_size_read(inode));
245  pr_err("\tnlink %u\n", inode->i_nlink);
246  pr_err("\tuid %u\n", (unsigned int)i_uid_read(inode));
247  pr_err("\tgid %u\n", (unsigned int)i_gid_read(inode));
248  pr_err("\tatime %u.%u\n",
249  (unsigned int)inode->i_atime.tv_sec,
250  (unsigned int)inode->i_atime.tv_nsec);
251  pr_err("\tmtime %u.%u\n",
252  (unsigned int)inode->i_mtime.tv_sec,
253  (unsigned int)inode->i_mtime.tv_nsec);
254  pr_err("\tctime %u.%u\n",
255  (unsigned int)inode->i_ctime.tv_sec,
256  (unsigned int)inode->i_ctime.tv_nsec);
257  pr_err("\tcreat_sqnum %llu\n", ui->creat_sqnum);
258  pr_err("\txattr_size %u\n", ui->xattr_size);
259  pr_err("\txattr_cnt %u\n", ui->xattr_cnt);
260  pr_err("\txattr_names %u\n", ui->xattr_names);
261  pr_err("\tdirty %u\n", ui->dirty);
262  pr_err("\txattr %u\n", ui->xattr);
263  pr_err("\tbulk_read %u\n", ui->xattr);
264  pr_err("\tsynced_i_size %llu\n",
265  (unsigned long long)ui->synced_i_size);
266  pr_err("\tui_size %llu\n",
267  (unsigned long long)ui->ui_size);
268  pr_err("\tflags %d\n", ui->flags);
269  pr_err("\tcompr_type %d\n", ui->compr_type);
270  pr_err("\tlast_page_read %lu\n", ui->last_page_read);
271  pr_err("\tread_in_a_row %lu\n", ui->read_in_a_row);
272  pr_err("\tdata_len %d\n", ui->data_len);
273 
274  if (!S_ISDIR(inode->i_mode))
275  return;
276 
277  pr_err("List of directory entries:\n");
278  ubifs_assert(!mutex_is_locked(&c->tnc_mutex));
279 
280  lowest_dent_key(c, &key, inode->i_ino);
281  while (1) {
282  dent = ubifs_tnc_next_ent(c, &key, &nm);
283  if (IS_ERR(dent)) {
284  if (PTR_ERR(dent) != -ENOENT)
285  pr_err("error %ld\n", PTR_ERR(dent));
286  break;
287  }
288 
289  pr_err("\t%d: %s (%s)\n",
290  count++, dent->name, get_dent_type(dent->type));
291 
292  nm.name = dent->name;
293  nm.len = le16_to_cpu(dent->nlen);
294  kfree(pdent);
295  pdent = dent;
296  key_read(c, &dent->key, &key);
297  }
298  kfree(pdent);
299 }
300 
301 void ubifs_dump_node(const struct ubifs_info *c, const void *node)
302 {
303  int i, n;
304  union ubifs_key key;
305  const struct ubifs_ch *ch = node;
306  char key_buf[DBG_KEY_BUF_LEN];
307 
308  /* If the magic is incorrect, just hexdump the first bytes */
309  if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) {
310  pr_err("Not a node, first %zu bytes:", UBIFS_CH_SZ);
311  print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 32, 1,
312  (void *)node, UBIFS_CH_SZ, 1);
313  return;
314  }
315 
316  spin_lock(&dbg_lock);
317  dump_ch(node);
318 
319  switch (ch->node_type) {
320  case UBIFS_PAD_NODE:
321  {
322  const struct ubifs_pad_node *pad = node;
323 
324  pr_err("\tpad_len %u\n", le32_to_cpu(pad->pad_len));
325  break;
326  }
327  case UBIFS_SB_NODE:
328  {
329  const struct ubifs_sb_node *sup = node;
330  unsigned int sup_flags = le32_to_cpu(sup->flags);
331 
332  pr_err("\tkey_hash %d (%s)\n",
333  (int)sup->key_hash, get_key_hash(sup->key_hash));
334  pr_err("\tkey_fmt %d (%s)\n",
335  (int)sup->key_fmt, get_key_fmt(sup->key_fmt));
336  pr_err("\tflags %#x\n", sup_flags);
337  pr_err("\t big_lpt %u\n",
338  !!(sup_flags & UBIFS_FLG_BIGLPT));
339  pr_err("\t space_fixup %u\n",
340  !!(sup_flags & UBIFS_FLG_SPACE_FIXUP));
341  pr_err("\tmin_io_size %u\n", le32_to_cpu(sup->min_io_size));
342  pr_err("\tleb_size %u\n", le32_to_cpu(sup->leb_size));
343  pr_err("\tleb_cnt %u\n", le32_to_cpu(sup->leb_cnt));
344  pr_err("\tmax_leb_cnt %u\n", le32_to_cpu(sup->max_leb_cnt));
345  pr_err("\tmax_bud_bytes %llu\n",
346  (unsigned long long)le64_to_cpu(sup->max_bud_bytes));
347  pr_err("\tlog_lebs %u\n", le32_to_cpu(sup->log_lebs));
348  pr_err("\tlpt_lebs %u\n", le32_to_cpu(sup->lpt_lebs));
349  pr_err("\torph_lebs %u\n", le32_to_cpu(sup->orph_lebs));
350  pr_err("\tjhead_cnt %u\n", le32_to_cpu(sup->jhead_cnt));
351  pr_err("\tfanout %u\n", le32_to_cpu(sup->fanout));
352  pr_err("\tlsave_cnt %u\n", le32_to_cpu(sup->lsave_cnt));
353  pr_err("\tdefault_compr %u\n",
354  (int)le16_to_cpu(sup->default_compr));
355  pr_err("\trp_size %llu\n",
356  (unsigned long long)le64_to_cpu(sup->rp_size));
357  pr_err("\trp_uid %u\n", le32_to_cpu(sup->rp_uid));
358  pr_err("\trp_gid %u\n", le32_to_cpu(sup->rp_gid));
359  pr_err("\tfmt_version %u\n", le32_to_cpu(sup->fmt_version));
360  pr_err("\ttime_gran %u\n", le32_to_cpu(sup->time_gran));
361  pr_err("\tUUID %pUB\n", sup->uuid);
362  break;
363  }
364  case UBIFS_MST_NODE:
365  {
366  const struct ubifs_mst_node *mst = node;
367 
368  pr_err("\thighest_inum %llu\n",
369  (unsigned long long)le64_to_cpu(mst->highest_inum));
370  pr_err("\tcommit number %llu\n",
371  (unsigned long long)le64_to_cpu(mst->cmt_no));
372  pr_err("\tflags %#x\n", le32_to_cpu(mst->flags));
373  pr_err("\tlog_lnum %u\n", le32_to_cpu(mst->log_lnum));
374  pr_err("\troot_lnum %u\n", le32_to_cpu(mst->root_lnum));
375  pr_err("\troot_offs %u\n", le32_to_cpu(mst->root_offs));
376  pr_err("\troot_len %u\n", le32_to_cpu(mst->root_len));
377  pr_err("\tgc_lnum %u\n", le32_to_cpu(mst->gc_lnum));
378  pr_err("\tihead_lnum %u\n", le32_to_cpu(mst->ihead_lnum));
379  pr_err("\tihead_offs %u\n", le32_to_cpu(mst->ihead_offs));
380  pr_err("\tindex_size %llu\n",
381  (unsigned long long)le64_to_cpu(mst->index_size));
382  pr_err("\tlpt_lnum %u\n", le32_to_cpu(mst->lpt_lnum));
383  pr_err("\tlpt_offs %u\n", le32_to_cpu(mst->lpt_offs));
384  pr_err("\tnhead_lnum %u\n", le32_to_cpu(mst->nhead_lnum));
385  pr_err("\tnhead_offs %u\n", le32_to_cpu(mst->nhead_offs));
386  pr_err("\tltab_lnum %u\n", le32_to_cpu(mst->ltab_lnum));
387  pr_err("\tltab_offs %u\n", le32_to_cpu(mst->ltab_offs));
388  pr_err("\tlsave_lnum %u\n", le32_to_cpu(mst->lsave_lnum));
389  pr_err("\tlsave_offs %u\n", le32_to_cpu(mst->lsave_offs));
390  pr_err("\tlscan_lnum %u\n", le32_to_cpu(mst->lscan_lnum));
391  pr_err("\tleb_cnt %u\n", le32_to_cpu(mst->leb_cnt));
392  pr_err("\tempty_lebs %u\n", le32_to_cpu(mst->empty_lebs));
393  pr_err("\tidx_lebs %u\n", le32_to_cpu(mst->idx_lebs));
394  pr_err("\ttotal_free %llu\n",
395  (unsigned long long)le64_to_cpu(mst->total_free));
396  pr_err("\ttotal_dirty %llu\n",
397  (unsigned long long)le64_to_cpu(mst->total_dirty));
398  pr_err("\ttotal_used %llu\n",
399  (unsigned long long)le64_to_cpu(mst->total_used));
400  pr_err("\ttotal_dead %llu\n",
401  (unsigned long long)le64_to_cpu(mst->total_dead));
402  pr_err("\ttotal_dark %llu\n",
403  (unsigned long long)le64_to_cpu(mst->total_dark));
404  break;
405  }
406  case UBIFS_REF_NODE:
407  {
408  const struct ubifs_ref_node *ref = node;
409 
410  pr_err("\tlnum %u\n", le32_to_cpu(ref->lnum));
411  pr_err("\toffs %u\n", le32_to_cpu(ref->offs));
412  pr_err("\tjhead %u\n", le32_to_cpu(ref->jhead));
413  break;
414  }
415  case UBIFS_INO_NODE:
416  {
417  const struct ubifs_ino_node *ino = node;
418 
419  key_read(c, &ino->key, &key);
420  pr_err("\tkey %s\n",
421  dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
422  pr_err("\tcreat_sqnum %llu\n",
423  (unsigned long long)le64_to_cpu(ino->creat_sqnum));
424  pr_err("\tsize %llu\n",
425  (unsigned long long)le64_to_cpu(ino->size));
426  pr_err("\tnlink %u\n", le32_to_cpu(ino->nlink));
427  pr_err("\tatime %lld.%u\n",
428  (long long)le64_to_cpu(ino->atime_sec),
429  le32_to_cpu(ino->atime_nsec));
430  pr_err("\tmtime %lld.%u\n",
431  (long long)le64_to_cpu(ino->mtime_sec),
432  le32_to_cpu(ino->mtime_nsec));
433  pr_err("\tctime %lld.%u\n",
434  (long long)le64_to_cpu(ino->ctime_sec),
435  le32_to_cpu(ino->ctime_nsec));
436  pr_err("\tuid %u\n", le32_to_cpu(ino->uid));
437  pr_err("\tgid %u\n", le32_to_cpu(ino->gid));
438  pr_err("\tmode %u\n", le32_to_cpu(ino->mode));
439  pr_err("\tflags %#x\n", le32_to_cpu(ino->flags));
440  pr_err("\txattr_cnt %u\n", le32_to_cpu(ino->xattr_cnt));
441  pr_err("\txattr_size %u\n", le32_to_cpu(ino->xattr_size));
442  pr_err("\txattr_names %u\n", le32_to_cpu(ino->xattr_names));
443  pr_err("\tcompr_type %#x\n",
444  (int)le16_to_cpu(ino->compr_type));
445  pr_err("\tdata len %u\n", le32_to_cpu(ino->data_len));
446  break;
447  }
448  case UBIFS_DENT_NODE:
449  case UBIFS_XENT_NODE:
450  {
451  const struct ubifs_dent_node *dent = node;
452  int nlen = le16_to_cpu(dent->nlen);
453 
454  key_read(c, &dent->key, &key);
455  pr_err("\tkey %s\n",
456  dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
457  pr_err("\tinum %llu\n",
458  (unsigned long long)le64_to_cpu(dent->inum));
459  pr_err("\ttype %d\n", (int)dent->type);
460  pr_err("\tnlen %d\n", nlen);
461  pr_err("\tname ");
462 
463  if (nlen > UBIFS_MAX_NLEN)
464  pr_err("(bad name length, not printing, bad or corrupted node)");
465  else {
466  for (i = 0; i < nlen && dent->name[i]; i++)
467  pr_cont("%c", dent->name[i]);
468  }
469  pr_cont("\n");
470 
471  break;
472  }
473  case UBIFS_DATA_NODE:
474  {
475  const struct ubifs_data_node *dn = node;
476  int dlen = le32_to_cpu(ch->len) - UBIFS_DATA_NODE_SZ;
477 
478  key_read(c, &dn->key, &key);
479  pr_err("\tkey %s\n",
480  dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
481  pr_err("\tsize %u\n", le32_to_cpu(dn->size));
482  pr_err("\tcompr_typ %d\n",
483  (int)le16_to_cpu(dn->compr_type));
484  pr_err("\tdata size %d\n", dlen);
485  pr_err("\tdata:\n");
486  print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1,
487  (void *)&dn->data, dlen, 0);
488  break;
489  }
490  case UBIFS_TRUN_NODE:
491  {
492  const struct ubifs_trun_node *trun = node;
493 
494  pr_err("\tinum %u\n", le32_to_cpu(trun->inum));
495  pr_err("\told_size %llu\n",
496  (unsigned long long)le64_to_cpu(trun->old_size));
497  pr_err("\tnew_size %llu\n",
498  (unsigned long long)le64_to_cpu(trun->new_size));
499  break;
500  }
501  case UBIFS_IDX_NODE:
502  {
503  const struct ubifs_idx_node *idx = node;
504 
505  n = le16_to_cpu(idx->child_cnt);
506  pr_err("\tchild_cnt %d\n", n);
507  pr_err("\tlevel %d\n", (int)le16_to_cpu(idx->level));
508  pr_err("\tBranches:\n");
509 
510  for (i = 0; i < n && i < c->fanout - 1; i++) {
511  const struct ubifs_branch *br;
512 
513  br = ubifs_idx_branch(c, idx, i);
514  key_read(c, &br->key, &key);
515  pr_err("\t%d: LEB %d:%d len %d key %s\n",
516  i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs),
517  le32_to_cpu(br->len),
518  dbg_snprintf_key(c, &key, key_buf,
519  DBG_KEY_BUF_LEN));
520  }
521  break;
522  }
523  case UBIFS_CS_NODE:
524  break;
525  case UBIFS_ORPH_NODE:
526  {
527  const struct ubifs_orph_node *orph = node;
528 
529  pr_err("\tcommit number %llu\n",
530  (unsigned long long)
531  le64_to_cpu(orph->cmt_no) & LLONG_MAX);
532  pr_err("\tlast node flag %llu\n",
533  (unsigned long long)(le64_to_cpu(orph->cmt_no)) >> 63);
534  n = (le32_to_cpu(ch->len) - UBIFS_ORPH_NODE_SZ) >> 3;
535  pr_err("\t%d orphan inode numbers:\n", n);
536  for (i = 0; i < n; i++)
537  pr_err("\t ino %llu\n",
538  (unsigned long long)le64_to_cpu(orph->inos[i]));
539  break;
540  }
541  default:
542  pr_err("node type %d was not recognized\n",
543  (int)ch->node_type);
544  }
545  spin_unlock(&dbg_lock);
546 }
547 
549 {
550  spin_lock(&dbg_lock);
551  pr_err("Budgeting request: new_ino %d, dirtied_ino %d\n",
552  req->new_ino, req->dirtied_ino);
553  pr_err("\tnew_ino_d %d, dirtied_ino_d %d\n",
554  req->new_ino_d, req->dirtied_ino_d);
555  pr_err("\tnew_page %d, dirtied_page %d\n",
556  req->new_page, req->dirtied_page);
557  pr_err("\tnew_dent %d, mod_dent %d\n",
558  req->new_dent, req->mod_dent);
559  pr_err("\tidx_growth %d\n", req->idx_growth);
560  pr_err("\tdata_growth %d dd_growth %d\n",
561  req->data_growth, req->dd_growth);
562  spin_unlock(&dbg_lock);
563 }
564 
565 void ubifs_dump_lstats(const struct ubifs_lp_stats *lst)
566 {
567  spin_lock(&dbg_lock);
568  pr_err("(pid %d) Lprops statistics: empty_lebs %d, idx_lebs %d\n",
569  current->pid, lst->empty_lebs, lst->idx_lebs);
570  pr_err("\ttaken_empty_lebs %d, total_free %lld, total_dirty %lld\n",
571  lst->taken_empty_lebs, lst->total_free, lst->total_dirty);
572  pr_err("\ttotal_used %lld, total_dark %lld, total_dead %lld\n",
573  lst->total_used, lst->total_dark, lst->total_dead);
574  spin_unlock(&dbg_lock);
575 }
576 
577 void ubifs_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi)
578 {
579  int i;
580  struct rb_node *rb;
581  struct ubifs_bud *bud;
582  struct ubifs_gced_idx_leb *idx_gc;
583  long long available, outstanding, free;
584 
585  spin_lock(&c->space_lock);
586  spin_lock(&dbg_lock);
587  pr_err("(pid %d) Budgeting info: data budget sum %lld, total budget sum %lld\n",
588  current->pid, bi->data_growth + bi->dd_growth,
589  bi->data_growth + bi->dd_growth + bi->idx_growth);
590  pr_err("\tbudg_data_growth %lld, budg_dd_growth %lld, budg_idx_growth %lld\n",
591  bi->data_growth, bi->dd_growth, bi->idx_growth);
592  pr_err("\tmin_idx_lebs %d, old_idx_sz %llu, uncommitted_idx %lld\n",
593  bi->min_idx_lebs, bi->old_idx_sz, bi->uncommitted_idx);
594  pr_err("\tpage_budget %d, inode_budget %d, dent_budget %d\n",
595  bi->page_budget, bi->inode_budget, bi->dent_budget);
596  pr_err("\tnospace %u, nospace_rp %u\n", bi->nospace, bi->nospace_rp);
597  pr_err("\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
598  c->dark_wm, c->dead_wm, c->max_idx_node_sz);
599 
600  if (bi != &c->bi)
601  /*
602  * If we are dumping saved budgeting data, do not print
603  * additional information which is about the current state, not
604  * the old one which corresponded to the saved budgeting data.
605  */
606  goto out_unlock;
607 
608  pr_err("\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n",
609  c->freeable_cnt, c->calc_idx_sz, c->idx_gc_cnt);
610  pr_err("\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, clean_zn_cnt %ld\n",
611  atomic_long_read(&c->dirty_pg_cnt),
612  atomic_long_read(&c->dirty_zn_cnt),
613  atomic_long_read(&c->clean_zn_cnt));
614  pr_err("\tgc_lnum %d, ihead_lnum %d\n", c->gc_lnum, c->ihead_lnum);
615 
616  /* If we are in R/O mode, journal heads do not exist */
617  if (c->jheads)
618  for (i = 0; i < c->jhead_cnt; i++)
619  pr_err("\tjhead %s\t LEB %d\n",
620  dbg_jhead(c->jheads[i].wbuf.jhead),
621  c->jheads[i].wbuf.lnum);
622  for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
623  bud = rb_entry(rb, struct ubifs_bud, rb);
624  pr_err("\tbud LEB %d\n", bud->lnum);
625  }
627  pr_err("\told bud LEB %d\n", bud->lnum);
628  list_for_each_entry(idx_gc, &c->idx_gc, list)
629  pr_err("\tGC'ed idx LEB %d unmap %d\n",
630  idx_gc->lnum, idx_gc->unmap);
631  pr_err("\tcommit state %d\n", c->cmt_state);
632 
633  /* Print budgeting predictions */
634  available = ubifs_calc_available(c, c->bi.min_idx_lebs);
635  outstanding = c->bi.data_growth + c->bi.dd_growth;
636  free = ubifs_get_free_space_nolock(c);
637  pr_err("Budgeting predictions:\n");
638  pr_err("\tavailable: %lld, outstanding %lld, free %lld\n",
639  available, outstanding, free);
640 out_unlock:
641  spin_unlock(&dbg_lock);
642  spin_unlock(&c->space_lock);
643 }
644 
645 void ubifs_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
646 {
647  int i, spc, dark = 0, dead = 0;
648  struct rb_node *rb;
649  struct ubifs_bud *bud;
650 
651  spc = lp->free + lp->dirty;
652  if (spc < c->dead_wm)
653  dead = spc;
654  else
655  dark = ubifs_calc_dark(c, spc);
656 
657  if (lp->flags & LPROPS_INDEX)
658  pr_err("LEB %-7d free %-8d dirty %-8d used %-8d free + dirty %-8d flags %#x (",
659  lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc,
660  lp->flags);
661  else
662  pr_err("LEB %-7d free %-8d dirty %-8d used %-8d free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d flags %#-4x (",
663  lp->lnum, lp->free, lp->dirty, c->leb_size - spc, spc,
664  dark, dead, (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags);
665 
666  if (lp->flags & LPROPS_TAKEN) {
667  if (lp->flags & LPROPS_INDEX)
668  pr_cont("index, taken");
669  else
670  pr_cont("taken");
671  } else {
672  const char *s;
673 
674  if (lp->flags & LPROPS_INDEX) {
675  switch (lp->flags & LPROPS_CAT_MASK) {
676  case LPROPS_DIRTY_IDX:
677  s = "dirty index";
678  break;
679  case LPROPS_FRDI_IDX:
680  s = "freeable index";
681  break;
682  default:
683  s = "index";
684  }
685  } else {
686  switch (lp->flags & LPROPS_CAT_MASK) {
687  case LPROPS_UNCAT:
688  s = "not categorized";
689  break;
690  case LPROPS_DIRTY:
691  s = "dirty";
692  break;
693  case LPROPS_FREE:
694  s = "free";
695  break;
696  case LPROPS_EMPTY:
697  s = "empty";
698  break;
699  case LPROPS_FREEABLE:
700  s = "freeable";
701  break;
702  default:
703  s = NULL;
704  break;
705  }
706  }
707  pr_cont("%s", s);
708  }
709 
710  for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) {
711  bud = rb_entry(rb, struct ubifs_bud, rb);
712  if (bud->lnum == lp->lnum) {
713  int head = 0;
714  for (i = 0; i < c->jhead_cnt; i++) {
715  /*
716  * Note, if we are in R/O mode or in the middle
717  * of mounting/re-mounting, the write-buffers do
718  * not exist.
719  */
720  if (c->jheads &&
721  lp->lnum == c->jheads[i].wbuf.lnum) {
722  pr_cont(", jhead %s", dbg_jhead(i));
723  head = 1;
724  }
725  }
726  if (!head)
727  pr_cont(", bud of jhead %s",
728  dbg_jhead(bud->jhead));
729  }
730  }
731  if (lp->lnum == c->gc_lnum)
732  pr_cont(", GC LEB");
733  pr_cont(")\n");
734 }
735 
737 {
738  int lnum, err;
739  struct ubifs_lprops lp;
740  struct ubifs_lp_stats lst;
741 
742  pr_err("(pid %d) start dumping LEB properties\n", current->pid);
743  ubifs_get_lp_stats(c, &lst);
744  ubifs_dump_lstats(&lst);
745 
746  for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
747  err = ubifs_read_one_lp(c, lnum, &lp);
748  if (err)
749  ubifs_err("cannot read lprops for LEB %d", lnum);
750 
751  ubifs_dump_lprop(c, &lp);
752  }
753  pr_err("(pid %d) finish dumping LEB properties\n", current->pid);
754 }
755 
757 {
758  int i;
759 
760  spin_lock(&dbg_lock);
761  pr_err("(pid %d) dumping LPT information\n", current->pid);
762  pr_err("\tlpt_sz: %lld\n", c->lpt_sz);
763  pr_err("\tpnode_sz: %d\n", c->pnode_sz);
764  pr_err("\tnnode_sz: %d\n", c->nnode_sz);
765  pr_err("\tltab_sz: %d\n", c->ltab_sz);
766  pr_err("\tlsave_sz: %d\n", c->lsave_sz);
767  pr_err("\tbig_lpt: %d\n", c->big_lpt);
768  pr_err("\tlpt_hght: %d\n", c->lpt_hght);
769  pr_err("\tpnode_cnt: %d\n", c->pnode_cnt);
770  pr_err("\tnnode_cnt: %d\n", c->nnode_cnt);
771  pr_err("\tdirty_pn_cnt: %d\n", c->dirty_pn_cnt);
772  pr_err("\tdirty_nn_cnt: %d\n", c->dirty_nn_cnt);
773  pr_err("\tlsave_cnt: %d\n", c->lsave_cnt);
774  pr_err("\tspace_bits: %d\n", c->space_bits);
775  pr_err("\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits);
776  pr_err("\tlpt_offs_bits: %d\n", c->lpt_offs_bits);
777  pr_err("\tlpt_spc_bits: %d\n", c->lpt_spc_bits);
778  pr_err("\tpcnt_bits: %d\n", c->pcnt_bits);
779  pr_err("\tlnum_bits: %d\n", c->lnum_bits);
780  pr_err("\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs);
781  pr_err("\tLPT head is at %d:%d\n",
782  c->nhead_lnum, c->nhead_offs);
783  pr_err("\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs);
784  if (c->big_lpt)
785  pr_err("\tLPT lsave is at %d:%d\n",
786  c->lsave_lnum, c->lsave_offs);
787  for (i = 0; i < c->lpt_lebs; i++)
788  pr_err("\tLPT LEB %d free %d dirty %d tgc %d cmt %d\n",
789  i + c->lpt_first, c->ltab[i].free, c->ltab[i].dirty,
790  c->ltab[i].tgc, c->ltab[i].cmt);
791  spin_unlock(&dbg_lock);
792 }
793 
794 void ubifs_dump_sleb(const struct ubifs_info *c,
795  const struct ubifs_scan_leb *sleb, int offs)
796 {
797  struct ubifs_scan_node *snod;
798 
799  pr_err("(pid %d) start dumping scanned data from LEB %d:%d\n",
800  current->pid, sleb->lnum, offs);
801 
802  list_for_each_entry(snod, &sleb->nodes, list) {
803  cond_resched();
804  pr_err("Dumping node at LEB %d:%d len %d\n",
805  sleb->lnum, snod->offs, snod->len);
806  ubifs_dump_node(c, snod->node);
807  }
808 }
809 
810 void ubifs_dump_leb(const struct ubifs_info *c, int lnum)
811 {
812  struct ubifs_scan_leb *sleb;
813  struct ubifs_scan_node *snod;
814  void *buf;
815 
816  pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum);
817 
819  if (!buf) {
820  ubifs_err("cannot allocate memory for dumping LEB %d", lnum);
821  return;
822  }
823 
824  sleb = ubifs_scan(c, lnum, 0, buf, 0);
825  if (IS_ERR(sleb)) {
826  ubifs_err("scan error %d", (int)PTR_ERR(sleb));
827  goto out;
828  }
829 
830  pr_err("LEB %d has %d nodes ending at %d\n", lnum,
831  sleb->nodes_cnt, sleb->endpt);
832 
833  list_for_each_entry(snod, &sleb->nodes, list) {
834  cond_resched();
835  pr_err("Dumping node at LEB %d:%d len %d\n", lnum,
836  snod->offs, snod->len);
837  ubifs_dump_node(c, snod->node);
838  }
839 
840  pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum);
841  ubifs_scan_destroy(sleb);
842 
843 out:
844  vfree(buf);
845  return;
846 }
847 
848 void ubifs_dump_znode(const struct ubifs_info *c,
849  const struct ubifs_znode *znode)
850 {
851  int n;
852  const struct ubifs_zbranch *zbr;
853  char key_buf[DBG_KEY_BUF_LEN];
854 
855  spin_lock(&dbg_lock);
856  if (znode->parent)
857  zbr = &znode->parent->zbranch[znode->iip];
858  else
859  zbr = &c->zroot;
860 
861  pr_err("znode %p, LEB %d:%d len %d parent %p iip %d level %d child_cnt %d flags %lx\n",
862  znode, zbr->lnum, zbr->offs, zbr->len, znode->parent, znode->iip,
863  znode->level, znode->child_cnt, znode->flags);
864 
865  if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
866  spin_unlock(&dbg_lock);
867  return;
868  }
869 
870  pr_err("zbranches:\n");
871  for (n = 0; n < znode->child_cnt; n++) {
872  zbr = &znode->zbranch[n];
873  if (znode->level > 0)
874  pr_err("\t%d: znode %p LEB %d:%d len %d key %s\n",
875  n, zbr->znode, zbr->lnum, zbr->offs, zbr->len,
876  dbg_snprintf_key(c, &zbr->key, key_buf,
877  DBG_KEY_BUF_LEN));
878  else
879  pr_err("\t%d: LNC %p LEB %d:%d len %d key %s\n",
880  n, zbr->znode, zbr->lnum, zbr->offs, zbr->len,
881  dbg_snprintf_key(c, &zbr->key, key_buf,
882  DBG_KEY_BUF_LEN));
883  }
884  spin_unlock(&dbg_lock);
885 }
886 
887 void ubifs_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat)
888 {
889  int i;
890 
891  pr_err("(pid %d) start dumping heap cat %d (%d elements)\n",
892  current->pid, cat, heap->cnt);
893  for (i = 0; i < heap->cnt; i++) {
894  struct ubifs_lprops *lprops = heap->arr[i];
895 
896  pr_err("\t%d. LEB %d hpos %d free %d dirty %d flags %d\n",
897  i, lprops->lnum, lprops->hpos, lprops->free,
898  lprops->dirty, lprops->flags);
899  }
900  pr_err("(pid %d) finish dumping heap\n", current->pid);
901 }
902 
903 void ubifs_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
904  struct ubifs_nnode *parent, int iip)
905 {
906  int i;
907 
908  pr_err("(pid %d) dumping pnode:\n", current->pid);
909  pr_err("\taddress %zx parent %zx cnext %zx\n",
910  (size_t)pnode, (size_t)parent, (size_t)pnode->cnext);
911  pr_err("\tflags %lu iip %d level %d num %d\n",
912  pnode->flags, iip, pnode->level, pnode->num);
913  for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
914  struct ubifs_lprops *lp = &pnode->lprops[i];
915 
916  pr_err("\t%d: free %d dirty %d flags %d lnum %d\n",
917  i, lp->free, lp->dirty, lp->flags, lp->lnum);
918  }
919 }
920 
922 {
923  struct ubifs_znode *znode;
924  int level;
925 
926  pr_err("\n");
927  pr_err("(pid %d) start dumping TNC tree\n", current->pid);
928  znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
929  level = znode->level;
930  pr_err("== Level %d ==\n", level);
931  while (znode) {
932  if (level != znode->level) {
933  level = znode->level;
934  pr_err("== Level %d ==\n", level);
935  }
936  ubifs_dump_znode(c, znode);
937  znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode);
938  }
939  pr_err("(pid %d) finish dumping TNC tree\n", current->pid);
940 }
941 
942 static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode,
943  void *priv)
944 {
945  ubifs_dump_znode(c, znode);
946  return 0;
947 }
948 
957 {
958  dbg_walk_index(c, NULL, dump_znode, NULL);
959 }
960 
969 {
970  struct ubifs_debug_info *d = c->dbg;
971  int freeable_cnt;
972 
973  spin_lock(&c->space_lock);
974  memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats));
975  memcpy(&d->saved_bi, &c->bi, sizeof(struct ubifs_budg_info));
977 
978  /*
979  * We use a dirty hack here and zero out @c->freeable_cnt, because it
980  * affects the free space calculations, and UBIFS might not know about
981  * all freeable eraseblocks. Indeed, we know about freeable eraseblocks
982  * only when we read their lprops, and we do this only lazily, upon the
983  * need. So at any given point of time @c->freeable_cnt might be not
984  * exactly accurate.
985  *
986  * Just one example about the issue we hit when we did not zero
987  * @c->freeable_cnt.
988  * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the
989  * amount of free space in @d->saved_free
990  * 2. We re-mount R/W, which makes UBIFS to read the "lsave"
991  * information from flash, where we cache LEBs from various
992  * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()'
993  * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()'
994  * -> 'ubifs_get_pnode()' -> 'update_cats()'
995  * -> 'ubifs_add_to_cat()').
996  * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt
997  * becomes %1.
998  * 4. We calculate the amount of free space when the re-mount is
999  * finished in 'dbg_check_space_info()' and it does not match
1000  * @d->saved_free.
1001  */
1002  freeable_cnt = c->freeable_cnt;
1003  c->freeable_cnt = 0;
1005  c->freeable_cnt = freeable_cnt;
1006  spin_unlock(&c->space_lock);
1007 }
1008 
1019 {
1020  struct ubifs_debug_info *d = c->dbg;
1021  struct ubifs_lp_stats lst;
1022  long long free;
1023  int freeable_cnt;
1024 
1025  spin_lock(&c->space_lock);
1026  freeable_cnt = c->freeable_cnt;
1027  c->freeable_cnt = 0;
1028  free = ubifs_get_free_space_nolock(c);
1029  c->freeable_cnt = freeable_cnt;
1030  spin_unlock(&c->space_lock);
1031 
1032  if (free != d->saved_free) {
1033  ubifs_err("free space changed from %lld to %lld",
1034  d->saved_free, free);
1035  goto out;
1036  }
1037 
1038  return 0;
1039 
1040 out:
1041  ubifs_msg("saved lprops statistics dump");
1043  ubifs_msg("saved budgeting info dump");
1044  ubifs_dump_budg(c, &d->saved_bi);
1045  ubifs_msg("saved idx_gc_cnt %d", d->saved_idx_gc_cnt);
1046  ubifs_msg("current lprops statistics dump");
1047  ubifs_get_lp_stats(c, &lst);
1048  ubifs_dump_lstats(&lst);
1049  ubifs_msg("current budgeting info dump");
1050  ubifs_dump_budg(c, &c->bi);
1051  dump_stack();
1052  return -EINVAL;
1053 }
1054 
1065 int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode)
1066 {
1067  int err = 0;
1068  struct ubifs_inode *ui = ubifs_inode(inode);
1069 
1070  if (!dbg_is_chk_gen(c))
1071  return 0;
1072  if (!S_ISREG(inode->i_mode))
1073  return 0;
1074 
1075  mutex_lock(&ui->ui_mutex);
1076  spin_lock(&ui->ui_lock);
1077  if (ui->ui_size != ui->synced_i_size && !ui->dirty) {
1078  ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode is clean",
1079  ui->ui_size, ui->synced_i_size);
1080  ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
1081  inode->i_mode, i_size_read(inode));
1082  dump_stack();
1083  err = -EINVAL;
1084  }
1085  spin_unlock(&ui->ui_lock);
1086  mutex_unlock(&ui->ui_mutex);
1087  return err;
1088 }
1089 
1090 /*
1091  * dbg_check_dir - check directory inode size and link count.
1092  * @c: UBIFS file-system description object
1093  * @dir: the directory to calculate size for
1094  * @size: the result is returned here
1095  *
1096  * This function makes sure that directory size and link count are correct.
1097  * Returns zero in case of success and a negative error code in case of
1098  * failure.
1099  *
1100  * Note, it is good idea to make sure the @dir->i_mutex is locked before
1101  * calling this function.
1102  */
1103 int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
1104 {
1105  unsigned int nlink = 2;
1106  union ubifs_key key;
1107  struct ubifs_dent_node *dent, *pdent = NULL;
1108  struct qstr nm = { .name = NULL };
1109  loff_t size = UBIFS_INO_NODE_SZ;
1110 
1111  if (!dbg_is_chk_gen(c))
1112  return 0;
1113 
1114  if (!S_ISDIR(dir->i_mode))
1115  return 0;
1116 
1117  lowest_dent_key(c, &key, dir->i_ino);
1118  while (1) {
1119  int err;
1120 
1121  dent = ubifs_tnc_next_ent(c, &key, &nm);
1122  if (IS_ERR(dent)) {
1123  err = PTR_ERR(dent);
1124  if (err == -ENOENT)
1125  break;
1126  return err;
1127  }
1128 
1129  nm.name = dent->name;
1130  nm.len = le16_to_cpu(dent->nlen);
1131  size += CALC_DENT_SIZE(nm.len);
1132  if (dent->type == UBIFS_ITYPE_DIR)
1133  nlink += 1;
1134  kfree(pdent);
1135  pdent = dent;
1136  key_read(c, &dent->key, &key);
1137  }
1138  kfree(pdent);
1139 
1140  if (i_size_read(dir) != size) {
1141  ubifs_err("directory inode %lu has size %llu, but calculated size is %llu",
1142  dir->i_ino, (unsigned long long)i_size_read(dir),
1143  (unsigned long long)size);
1144  ubifs_dump_inode(c, dir);
1145  dump_stack();
1146  return -EINVAL;
1147  }
1148  if (dir->i_nlink != nlink) {
1149  ubifs_err("directory inode %lu has nlink %u, but calculated nlink is %u",
1150  dir->i_ino, dir->i_nlink, nlink);
1151  ubifs_dump_inode(c, dir);
1152  dump_stack();
1153  return -EINVAL;
1154  }
1155 
1156  return 0;
1157 }
1158 
1172 static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
1173  struct ubifs_zbranch *zbr2)
1174 {
1175  int err, nlen1, nlen2, cmp;
1176  struct ubifs_dent_node *dent1, *dent2;
1177  union ubifs_key key;
1178  char key_buf[DBG_KEY_BUF_LEN];
1179 
1180  ubifs_assert(!keys_cmp(c, &zbr1->key, &zbr2->key));
1182  if (!dent1)
1183  return -ENOMEM;
1185  if (!dent2) {
1186  err = -ENOMEM;
1187  goto out_free;
1188  }
1189 
1190  err = ubifs_tnc_read_node(c, zbr1, dent1);
1191  if (err)
1192  goto out_free;
1193  err = ubifs_validate_entry(c, dent1);
1194  if (err)
1195  goto out_free;
1196 
1197  err = ubifs_tnc_read_node(c, zbr2, dent2);
1198  if (err)
1199  goto out_free;
1200  err = ubifs_validate_entry(c, dent2);
1201  if (err)
1202  goto out_free;
1203 
1204  /* Make sure node keys are the same as in zbranch */
1205  err = 1;
1206  key_read(c, &dent1->key, &key);
1207  if (keys_cmp(c, &zbr1->key, &key)) {
1208  ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum,
1209  zbr1->offs, dbg_snprintf_key(c, &key, key_buf,
1210  DBG_KEY_BUF_LEN));
1211  ubifs_err("but it should have key %s according to tnc",
1212  dbg_snprintf_key(c, &zbr1->key, key_buf,
1213  DBG_KEY_BUF_LEN));
1214  ubifs_dump_node(c, dent1);
1215  goto out_free;
1216  }
1217 
1218  key_read(c, &dent2->key, &key);
1219  if (keys_cmp(c, &zbr2->key, &key)) {
1220  ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum,
1221  zbr1->offs, dbg_snprintf_key(c, &key, key_buf,
1222  DBG_KEY_BUF_LEN));
1223  ubifs_err("but it should have key %s according to tnc",
1224  dbg_snprintf_key(c, &zbr2->key, key_buf,
1225  DBG_KEY_BUF_LEN));
1226  ubifs_dump_node(c, dent2);
1227  goto out_free;
1228  }
1229 
1230  nlen1 = le16_to_cpu(dent1->nlen);
1231  nlen2 = le16_to_cpu(dent2->nlen);
1232 
1233  cmp = memcmp(dent1->name, dent2->name, min_t(int, nlen1, nlen2));
1234  if (cmp < 0 || (cmp == 0 && nlen1 < nlen2)) {
1235  err = 0;
1236  goto out_free;
1237  }
1238  if (cmp == 0 && nlen1 == nlen2)
1239  ubifs_err("2 xent/dent nodes with the same name");
1240  else
1241  ubifs_err("bad order of colliding key %s",
1242  dbg_snprintf_key(c, &key, key_buf, DBG_KEY_BUF_LEN));
1243 
1244  ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs);
1245  ubifs_dump_node(c, dent1);
1246  ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs);
1247  ubifs_dump_node(c, dent2);
1248 
1249 out_free:
1250  kfree(dent2);
1251  kfree(dent1);
1252  return err;
1253 }
1254 
1263 static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr)
1264 {
1265  struct ubifs_znode *znode = zbr->znode;
1266  struct ubifs_znode *zp = znode->parent;
1267  int n, err, cmp;
1268 
1269  if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
1270  err = 1;
1271  goto out;
1272  }
1273  if (znode->level < 0) {
1274  err = 2;
1275  goto out;
1276  }
1277  if (znode->iip < 0 || znode->iip >= c->fanout) {
1278  err = 3;
1279  goto out;
1280  }
1281 
1282  if (zbr->len == 0)
1283  /* Only dirty zbranch may have no on-flash nodes */
1284  if (!ubifs_zn_dirty(znode)) {
1285  err = 4;
1286  goto out;
1287  }
1288 
1289  if (ubifs_zn_dirty(znode)) {
1290  /*
1291  * If znode is dirty, its parent has to be dirty as well. The
1292  * order of the operation is important, so we have to have
1293  * memory barriers.
1294  */
1295  smp_mb();
1296  if (zp && !ubifs_zn_dirty(zp)) {
1297  /*
1298  * The dirty flag is atomic and is cleared outside the
1299  * TNC mutex, so znode's dirty flag may now have
1300  * been cleared. The child is always cleared before the
1301  * parent, so we just need to check again.
1302  */
1303  smp_mb();
1304  if (ubifs_zn_dirty(znode)) {
1305  err = 5;
1306  goto out;
1307  }
1308  }
1309  }
1310 
1311  if (zp) {
1312  const union ubifs_key *min, *max;
1313 
1314  if (znode->level != zp->level - 1) {
1315  err = 6;
1316  goto out;
1317  }
1318 
1319  /* Make sure the 'parent' pointer in our znode is correct */
1320  err = ubifs_search_zbranch(c, zp, &zbr->key, &n);
1321  if (!err) {
1322  /* This zbranch does not exist in the parent */
1323  err = 7;
1324  goto out;
1325  }
1326 
1327  if (znode->iip >= zp->child_cnt) {
1328  err = 8;
1329  goto out;
1330  }
1331 
1332  if (znode->iip != n) {
1333  /* This may happen only in case of collisions */
1334  if (keys_cmp(c, &zp->zbranch[n].key,
1335  &zp->zbranch[znode->iip].key)) {
1336  err = 9;
1337  goto out;
1338  }
1339  n = znode->iip;
1340  }
1341 
1342  /*
1343  * Make sure that the first key in our znode is greater than or
1344  * equal to the key in the pointing zbranch.
1345  */
1346  min = &zbr->key;
1347  cmp = keys_cmp(c, min, &znode->zbranch[0].key);
1348  if (cmp == 1) {
1349  err = 10;
1350  goto out;
1351  }
1352 
1353  if (n + 1 < zp->child_cnt) {
1354  max = &zp->zbranch[n + 1].key;
1355 
1356  /*
1357  * Make sure the last key in our znode is less or
1358  * equivalent than the key in the zbranch which goes
1359  * after our pointing zbranch.
1360  */
1361  cmp = keys_cmp(c, max,
1362  &znode->zbranch[znode->child_cnt - 1].key);
1363  if (cmp == -1) {
1364  err = 11;
1365  goto out;
1366  }
1367  }
1368  } else {
1369  /* This may only be root znode */
1370  if (zbr != &c->zroot) {
1371  err = 12;
1372  goto out;
1373  }
1374  }
1375 
1376  /*
1377  * Make sure that next key is greater or equivalent then the previous
1378  * one.
1379  */
1380  for (n = 1; n < znode->child_cnt; n++) {
1381  cmp = keys_cmp(c, &znode->zbranch[n - 1].key,
1382  &znode->zbranch[n].key);
1383  if (cmp > 0) {
1384  err = 13;
1385  goto out;
1386  }
1387  if (cmp == 0) {
1388  /* This can only be keys with colliding hash */
1389  if (!is_hash_key(c, &znode->zbranch[n].key)) {
1390  err = 14;
1391  goto out;
1392  }
1393 
1394  if (znode->level != 0 || c->replaying)
1395  continue;
1396 
1397  /*
1398  * Colliding keys should follow binary order of
1399  * corresponding xentry/dentry names.
1400  */
1401  err = dbg_check_key_order(c, &znode->zbranch[n - 1],
1402  &znode->zbranch[n]);
1403  if (err < 0)
1404  return err;
1405  if (err) {
1406  err = 15;
1407  goto out;
1408  }
1409  }
1410  }
1411 
1412  for (n = 0; n < znode->child_cnt; n++) {
1413  if (!znode->zbranch[n].znode &&
1414  (znode->zbranch[n].lnum == 0 ||
1415  znode->zbranch[n].len == 0)) {
1416  err = 16;
1417  goto out;
1418  }
1419 
1420  if (znode->zbranch[n].lnum != 0 &&
1421  znode->zbranch[n].len == 0) {
1422  err = 17;
1423  goto out;
1424  }
1425 
1426  if (znode->zbranch[n].lnum == 0 &&
1427  znode->zbranch[n].len != 0) {
1428  err = 18;
1429  goto out;
1430  }
1431 
1432  if (znode->zbranch[n].lnum == 0 &&
1433  znode->zbranch[n].offs != 0) {
1434  err = 19;
1435  goto out;
1436  }
1437 
1438  if (znode->level != 0 && znode->zbranch[n].znode)
1439  if (znode->zbranch[n].znode->parent != znode) {
1440  err = 20;
1441  goto out;
1442  }
1443  }
1444 
1445  return 0;
1446 
1447 out:
1448  ubifs_err("failed, error %d", err);
1449  ubifs_msg("dump of the znode");
1450  ubifs_dump_znode(c, znode);
1451  if (zp) {
1452  ubifs_msg("dump of the parent znode");
1453  ubifs_dump_znode(c, zp);
1454  }
1455  dump_stack();
1456  return -EINVAL;
1457 }
1458 
1467 int dbg_check_tnc(struct ubifs_info *c, int extra)
1468 {
1469  struct ubifs_znode *znode;
1470  long clean_cnt = 0, dirty_cnt = 0;
1471  int err, last;
1472 
1473  if (!dbg_is_chk_index(c))
1474  return 0;
1475 
1476  ubifs_assert(mutex_is_locked(&c->tnc_mutex));
1477  if (!c->zroot.znode)
1478  return 0;
1479 
1480  znode = ubifs_tnc_postorder_first(c->zroot.znode);
1481  while (1) {
1482  struct ubifs_znode *prev;
1483  struct ubifs_zbranch *zbr;
1484 
1485  if (!znode->parent)
1486  zbr = &c->zroot;
1487  else
1488  zbr = &znode->parent->zbranch[znode->iip];
1489 
1490  err = dbg_check_znode(c, zbr);
1491  if (err)
1492  return err;
1493 
1494  if (extra) {
1495  if (ubifs_zn_dirty(znode))
1496  dirty_cnt += 1;
1497  else
1498  clean_cnt += 1;
1499  }
1500 
1501  prev = znode;
1502  znode = ubifs_tnc_postorder_next(znode);
1503  if (!znode)
1504  break;
1505 
1506  /*
1507  * If the last key of this znode is equivalent to the first key
1508  * of the next znode (collision), then check order of the keys.
1509  */
1510  last = prev->child_cnt - 1;
1511  if (prev->level == 0 && znode->level == 0 && !c->replaying &&
1512  !keys_cmp(c, &prev->zbranch[last].key,
1513  &znode->zbranch[0].key)) {
1514  err = dbg_check_key_order(c, &prev->zbranch[last],
1515  &znode->zbranch[0]);
1516  if (err < 0)
1517  return err;
1518  if (err) {
1519  ubifs_msg("first znode");
1520  ubifs_dump_znode(c, prev);
1521  ubifs_msg("second znode");
1522  ubifs_dump_znode(c, znode);
1523  return -EINVAL;
1524  }
1525  }
1526  }
1527 
1528  if (extra) {
1529  if (clean_cnt != atomic_long_read(&c->clean_zn_cnt)) {
1530  ubifs_err("incorrect clean_zn_cnt %ld, calculated %ld",
1531  atomic_long_read(&c->clean_zn_cnt),
1532  clean_cnt);
1533  return -EINVAL;
1534  }
1535  if (dirty_cnt != atomic_long_read(&c->dirty_zn_cnt)) {
1536  ubifs_err("incorrect dirty_zn_cnt %ld, calculated %ld",
1537  atomic_long_read(&c->dirty_zn_cnt),
1538  dirty_cnt);
1539  return -EINVAL;
1540  }
1541  }
1542 
1543  return 0;
1544 }
1545 
1562  dbg_znode_callback znode_cb, void *priv)
1563 {
1564  int err;
1565  struct ubifs_zbranch *zbr;
1566  struct ubifs_znode *znode, *child;
1567 
1568  mutex_lock(&c->tnc_mutex);
1569  /* If the root indexing node is not in TNC - pull it */
1570  if (!c->zroot.znode) {
1571  c->zroot.znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
1572  if (IS_ERR(c->zroot.znode)) {
1573  err = PTR_ERR(c->zroot.znode);
1574  c->zroot.znode = NULL;
1575  goto out_unlock;
1576  }
1577  }
1578 
1579  /*
1580  * We are going to traverse the indexing tree in the postorder manner.
1581  * Go down and find the leftmost indexing node where we are going to
1582  * start from.
1583  */
1584  znode = c->zroot.znode;
1585  while (znode->level > 0) {
1586  zbr = &znode->zbranch[0];
1587  child = zbr->znode;
1588  if (!child) {
1589  child = ubifs_load_znode(c, zbr, znode, 0);
1590  if (IS_ERR(child)) {
1591  err = PTR_ERR(child);
1592  goto out_unlock;
1593  }
1594  zbr->znode = child;
1595  }
1596 
1597  znode = child;
1598  }
1599 
1600  /* Iterate over all indexing nodes */
1601  while (1) {
1602  int idx;
1603 
1604  cond_resched();
1605 
1606  if (znode_cb) {
1607  err = znode_cb(c, znode, priv);
1608  if (err) {
1609  ubifs_err("znode checking function returned error %d",
1610  err);
1611  ubifs_dump_znode(c, znode);
1612  goto out_dump;
1613  }
1614  }
1615  if (leaf_cb && znode->level == 0) {
1616  for (idx = 0; idx < znode->child_cnt; idx++) {
1617  zbr = &znode->zbranch[idx];
1618  err = leaf_cb(c, zbr, priv);
1619  if (err) {
1620  ubifs_err("leaf checking function returned error %d, for leaf at LEB %d:%d",
1621  err, zbr->lnum, zbr->offs);
1622  goto out_dump;
1623  }
1624  }
1625  }
1626 
1627  if (!znode->parent)
1628  break;
1629 
1630  idx = znode->iip + 1;
1631  znode = znode->parent;
1632  if (idx < znode->child_cnt) {
1633  /* Switch to the next index in the parent */
1634  zbr = &znode->zbranch[idx];
1635  child = zbr->znode;
1636  if (!child) {
1637  child = ubifs_load_znode(c, zbr, znode, idx);
1638  if (IS_ERR(child)) {
1639  err = PTR_ERR(child);
1640  goto out_unlock;
1641  }
1642  zbr->znode = child;
1643  }
1644  znode = child;
1645  } else
1646  /*
1647  * This is the last child, switch to the parent and
1648  * continue.
1649  */
1650  continue;
1651 
1652  /* Go to the lowest leftmost znode in the new sub-tree */
1653  while (znode->level > 0) {
1654  zbr = &znode->zbranch[0];
1655  child = zbr->znode;
1656  if (!child) {
1657  child = ubifs_load_znode(c, zbr, znode, 0);
1658  if (IS_ERR(child)) {
1659  err = PTR_ERR(child);
1660  goto out_unlock;
1661  }
1662  zbr->znode = child;
1663  }
1664  znode = child;
1665  }
1666  }
1667 
1668  mutex_unlock(&c->tnc_mutex);
1669  return 0;
1670 
1671 out_dump:
1672  if (znode->parent)
1673  zbr = &znode->parent->zbranch[znode->iip];
1674  else
1675  zbr = &c->zroot;
1676  ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
1677  ubifs_dump_znode(c, znode);
1678 out_unlock:
1679  mutex_unlock(&c->tnc_mutex);
1680  return err;
1681 }
1682 
1693 static int add_size(struct ubifs_info *c, struct ubifs_znode *znode, void *priv)
1694 {
1695  long long *idx_size = priv;
1696  int add;
1697 
1698  add = ubifs_idx_node_sz(c, znode->child_cnt);
1699  add = ALIGN(add, 8);
1700  *idx_size += add;
1701  return 0;
1702 }
1703 
1713 int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
1714 {
1715  int err;
1716  long long calc = 0;
1717 
1718  if (!dbg_is_chk_index(c))
1719  return 0;
1720 
1721  err = dbg_walk_index(c, NULL, add_size, &calc);
1722  if (err) {
1723  ubifs_err("error %d while walking the index", err);
1724  return err;
1725  }
1726 
1727  if (calc != idx_size) {
1728  ubifs_err("index size check failed: calculated size is %lld, should be %lld",
1729  calc, idx_size);
1730  dump_stack();
1731  return -EINVAL;
1732  }
1733 
1734  return 0;
1735 }
1736 
1757 struct fsck_inode {
1758  struct rb_node rb;
1761  unsigned int nlink;
1762  unsigned int xattr_cnt;
1765  long long size;
1766  unsigned int xattr_sz;
1767  long long calc_sz;
1768  long long calc_xcnt;
1769  long long calc_xsz;
1770  unsigned int xattr_nms;
1771  long long calc_xnms;
1772 };
1773 
1778 struct fsck_data {
1779  struct rb_root inodes;
1780 };
1781 
1792 static struct fsck_inode *add_inode(struct ubifs_info *c,
1793  struct fsck_data *fsckd,
1794  struct ubifs_ino_node *ino)
1795 {
1796  struct rb_node **p, *parent = NULL;
1797  struct fsck_inode *fscki;
1798  ino_t inum = key_inum_flash(c, &ino->key);
1799  struct inode *inode;
1800  struct ubifs_inode *ui;
1801 
1802  p = &fsckd->inodes.rb_node;
1803  while (*p) {
1804  parent = *p;
1805  fscki = rb_entry(parent, struct fsck_inode, rb);
1806  if (inum < fscki->inum)
1807  p = &(*p)->rb_left;
1808  else if (inum > fscki->inum)
1809  p = &(*p)->rb_right;
1810  else
1811  return fscki;
1812  }
1813 
1814  if (inum > c->highest_inum) {
1815  ubifs_err("too high inode number, max. is %lu",
1816  (unsigned long)c->highest_inum);
1817  return ERR_PTR(-EINVAL);
1818  }
1819 
1820  fscki = kzalloc(sizeof(struct fsck_inode), GFP_NOFS);
1821  if (!fscki)
1822  return ERR_PTR(-ENOMEM);
1823 
1824  inode = ilookup(c->vfs_sb, inum);
1825 
1826  fscki->inum = inum;
1827  /*
1828  * If the inode is present in the VFS inode cache, use it instead of
1829  * the on-flash inode which might be out-of-date. E.g., the size might
1830  * be out-of-date. If we do not do this, the following may happen, for
1831  * example:
1832  * 1. A power cut happens
1833  * 2. We mount the file-system R/O, the replay process fixes up the
1834  * inode size in the VFS cache, but on on-flash.
1835  * 3. 'check_leaf()' fails because it hits a data node beyond inode
1836  * size.
1837  */
1838  if (!inode) {
1839  fscki->nlink = le32_to_cpu(ino->nlink);
1840  fscki->size = le64_to_cpu(ino->size);
1841  fscki->xattr_cnt = le32_to_cpu(ino->xattr_cnt);
1842  fscki->xattr_sz = le32_to_cpu(ino->xattr_size);
1843  fscki->xattr_nms = le32_to_cpu(ino->xattr_names);
1844  fscki->mode = le32_to_cpu(ino->mode);
1845  } else {
1846  ui = ubifs_inode(inode);
1847  fscki->nlink = inode->i_nlink;
1848  fscki->size = inode->i_size;
1849  fscki->xattr_cnt = ui->xattr_cnt;
1850  fscki->xattr_sz = ui->xattr_size;
1851  fscki->xattr_nms = ui->xattr_names;
1852  fscki->mode = inode->i_mode;
1853  iput(inode);
1854  }
1855 
1856  if (S_ISDIR(fscki->mode)) {
1857  fscki->calc_sz = UBIFS_INO_NODE_SZ;
1858  fscki->calc_cnt = 2;
1859  }
1860 
1861  rb_link_node(&fscki->rb, parent, p);
1862  rb_insert_color(&fscki->rb, &fsckd->inodes);
1863 
1864  return fscki;
1865 }
1866 
1876 static struct fsck_inode *search_inode(struct fsck_data *fsckd, ino_t inum)
1877 {
1878  struct rb_node *p;
1879  struct fsck_inode *fscki;
1880 
1881  p = fsckd->inodes.rb_node;
1882  while (p) {
1883  fscki = rb_entry(p, struct fsck_inode, rb);
1884  if (inum < fscki->inum)
1885  p = p->rb_left;
1886  else if (inum > fscki->inum)
1887  p = p->rb_right;
1888  else
1889  return fscki;
1890  }
1891  return NULL;
1892 }
1893 
1905 static struct fsck_inode *read_add_inode(struct ubifs_info *c,
1906  struct fsck_data *fsckd, ino_t inum)
1907 {
1908  int n, err;
1909  union ubifs_key key;
1910  struct ubifs_znode *znode;
1911  struct ubifs_zbranch *zbr;
1912  struct ubifs_ino_node *ino;
1913  struct fsck_inode *fscki;
1914 
1915  fscki = search_inode(fsckd, inum);
1916  if (fscki)
1917  return fscki;
1918 
1919  ino_key_init(c, &key, inum);
1920  err = ubifs_lookup_level0(c, &key, &znode, &n);
1921  if (!err) {
1922  ubifs_err("inode %lu not found in index", (unsigned long)inum);
1923  return ERR_PTR(-ENOENT);
1924  } else if (err < 0) {
1925  ubifs_err("error %d while looking up inode %lu",
1926  err, (unsigned long)inum);
1927  return ERR_PTR(err);
1928  }
1929 
1930  zbr = &znode->zbranch[n];
1931  if (zbr->len < UBIFS_INO_NODE_SZ) {
1932  ubifs_err("bad node %lu node length %d",
1933  (unsigned long)inum, zbr->len);
1934  return ERR_PTR(-EINVAL);
1935  }
1936 
1937  ino = kmalloc(zbr->len, GFP_NOFS);
1938  if (!ino)
1939  return ERR_PTR(-ENOMEM);
1940 
1941  err = ubifs_tnc_read_node(c, zbr, ino);
1942  if (err) {
1943  ubifs_err("cannot read inode node at LEB %d:%d, error %d",
1944  zbr->lnum, zbr->offs, err);
1945  kfree(ino);
1946  return ERR_PTR(err);
1947  }
1948 
1949  fscki = add_inode(c, fsckd, ino);
1950  kfree(ino);
1951  if (IS_ERR(fscki)) {
1952  ubifs_err("error %ld while adding inode %lu node",
1953  PTR_ERR(fscki), (unsigned long)inum);
1954  return fscki;
1955  }
1956 
1957  return fscki;
1958 }
1959 
1976 static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
1977  void *priv)
1978 {
1979  ino_t inum;
1980  void *node;
1981  struct ubifs_ch *ch;
1982  int err, type = key_type(c, &zbr->key);
1983  struct fsck_inode *fscki;
1984 
1985  if (zbr->len < UBIFS_CH_SZ) {
1986  ubifs_err("bad leaf length %d (LEB %d:%d)",
1987  zbr->len, zbr->lnum, zbr->offs);
1988  return -EINVAL;
1989  }
1990 
1991  node = kmalloc(zbr->len, GFP_NOFS);
1992  if (!node)
1993  return -ENOMEM;
1994 
1995  err = ubifs_tnc_read_node(c, zbr, node);
1996  if (err) {
1997  ubifs_err("cannot read leaf node at LEB %d:%d, error %d",
1998  zbr->lnum, zbr->offs, err);
1999  goto out_free;
2000  }
2001 
2002  /* If this is an inode node, add it to RB-tree of inodes */
2003  if (type == UBIFS_INO_KEY) {
2004  fscki = add_inode(c, priv, node);
2005  if (IS_ERR(fscki)) {
2006  err = PTR_ERR(fscki);
2007  ubifs_err("error %d while adding inode node", err);
2008  goto out_dump;
2009  }
2010  goto out;
2011  }
2012 
2013  if (type != UBIFS_DENT_KEY && type != UBIFS_XENT_KEY &&
2014  type != UBIFS_DATA_KEY) {
2015  ubifs_err("unexpected node type %d at LEB %d:%d",
2016  type, zbr->lnum, zbr->offs);
2017  err = -EINVAL;
2018  goto out_free;
2019  }
2020 
2021  ch = node;
2022  if (le64_to_cpu(ch->sqnum) > c->max_sqnum) {
2023  ubifs_err("too high sequence number, max. is %llu",
2024  c->max_sqnum);
2025  err = -EINVAL;
2026  goto out_dump;
2027  }
2028 
2029  if (type == UBIFS_DATA_KEY) {
2030  long long blk_offs;
2031  struct ubifs_data_node *dn = node;
2032 
2033  /*
2034  * Search the inode node this data node belongs to and insert
2035  * it to the RB-tree of inodes.
2036  */
2037  inum = key_inum_flash(c, &dn->key);
2038  fscki = read_add_inode(c, priv, inum);
2039  if (IS_ERR(fscki)) {
2040  err = PTR_ERR(fscki);
2041  ubifs_err("error %d while processing data node and trying to find inode node %lu",
2042  err, (unsigned long)inum);
2043  goto out_dump;
2044  }
2045 
2046  /* Make sure the data node is within inode size */
2047  blk_offs = key_block_flash(c, &dn->key);
2048  blk_offs <<= UBIFS_BLOCK_SHIFT;
2049  blk_offs += le32_to_cpu(dn->size);
2050  if (blk_offs > fscki->size) {
2051  ubifs_err("data node at LEB %d:%d is not within inode size %lld",
2052  zbr->lnum, zbr->offs, fscki->size);
2053  err = -EINVAL;
2054  goto out_dump;
2055  }
2056  } else {
2057  int nlen;
2058  struct ubifs_dent_node *dent = node;
2059  struct fsck_inode *fscki1;
2060 
2061  err = ubifs_validate_entry(c, dent);
2062  if (err)
2063  goto out_dump;
2064 
2065  /*
2066  * Search the inode node this entry refers to and the parent
2067  * inode node and insert them to the RB-tree of inodes.
2068  */
2069  inum = le64_to_cpu(dent->inum);
2070  fscki = read_add_inode(c, priv, inum);
2071  if (IS_ERR(fscki)) {
2072  err = PTR_ERR(fscki);
2073  ubifs_err("error %d while processing entry node and trying to find inode node %lu",
2074  err, (unsigned long)inum);
2075  goto out_dump;
2076  }
2077 
2078  /* Count how many direntries or xentries refers this inode */
2079  fscki->references += 1;
2080 
2081  inum = key_inum_flash(c, &dent->key);
2082  fscki1 = read_add_inode(c, priv, inum);
2083  if (IS_ERR(fscki1)) {
2084  err = PTR_ERR(fscki1);
2085  ubifs_err("error %d while processing entry node and trying to find parent inode node %lu",
2086  err, (unsigned long)inum);
2087  goto out_dump;
2088  }
2089 
2090  nlen = le16_to_cpu(dent->nlen);
2091  if (type == UBIFS_XENT_KEY) {
2092  fscki1->calc_xcnt += 1;
2093  fscki1->calc_xsz += CALC_DENT_SIZE(nlen);
2094  fscki1->calc_xsz += CALC_XATTR_BYTES(fscki->size);
2095  fscki1->calc_xnms += nlen;
2096  } else {
2097  fscki1->calc_sz += CALC_DENT_SIZE(nlen);
2098  if (dent->type == UBIFS_ITYPE_DIR)
2099  fscki1->calc_cnt += 1;
2100  }
2101  }
2102 
2103 out:
2104  kfree(node);
2105  return 0;
2106 
2107 out_dump:
2108  ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
2109  ubifs_dump_node(c, node);
2110 out_free:
2111  kfree(node);
2112  return err;
2113 }
2114 
2119 static void free_inodes(struct fsck_data *fsckd)
2120 {
2121  struct rb_node *this = fsckd->inodes.rb_node;
2122  struct fsck_inode *fscki;
2123 
2124  while (this) {
2125  if (this->rb_left)
2126  this = this->rb_left;
2127  else if (this->rb_right)
2128  this = this->rb_right;
2129  else {
2130  fscki = rb_entry(this, struct fsck_inode, rb);
2131  this = rb_parent(this);
2132  if (this) {
2133  if (this->rb_left == &fscki->rb)
2134  this->rb_left = NULL;
2135  else
2136  this->rb_right = NULL;
2137  }
2138  kfree(fscki);
2139  }
2140  }
2141 }
2142 
2153 static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
2154 {
2155  int n, err;
2156  union ubifs_key key;
2157  struct ubifs_znode *znode;
2158  struct ubifs_zbranch *zbr;
2159  struct ubifs_ino_node *ino;
2160  struct fsck_inode *fscki;
2161  struct rb_node *this = rb_first(&fsckd->inodes);
2162 
2163  while (this) {
2164  fscki = rb_entry(this, struct fsck_inode, rb);
2165  this = rb_next(this);
2166 
2167  if (S_ISDIR(fscki->mode)) {
2168  /*
2169  * Directories have to have exactly one reference (they
2170  * cannot have hardlinks), although root inode is an
2171  * exception.
2172  */
2173  if (fscki->inum != UBIFS_ROOT_INO &&
2174  fscki->references != 1) {
2175  ubifs_err("directory inode %lu has %d direntries which refer it, but should be 1",
2176  (unsigned long)fscki->inum,
2177  fscki->references);
2178  goto out_dump;
2179  }
2180  if (fscki->inum == UBIFS_ROOT_INO &&
2181  fscki->references != 0) {
2182  ubifs_err("root inode %lu has non-zero (%d) direntries which refer it",
2183  (unsigned long)fscki->inum,
2184  fscki->references);
2185  goto out_dump;
2186  }
2187  if (fscki->calc_sz != fscki->size) {
2188  ubifs_err("directory inode %lu size is %lld, but calculated size is %lld",
2189  (unsigned long)fscki->inum,
2190  fscki->size, fscki->calc_sz);
2191  goto out_dump;
2192  }
2193  if (fscki->calc_cnt != fscki->nlink) {
2194  ubifs_err("directory inode %lu nlink is %d, but calculated nlink is %d",
2195  (unsigned long)fscki->inum,
2196  fscki->nlink, fscki->calc_cnt);
2197  goto out_dump;
2198  }
2199  } else {
2200  if (fscki->references != fscki->nlink) {
2201  ubifs_err("inode %lu nlink is %d, but calculated nlink is %d",
2202  (unsigned long)fscki->inum,
2203  fscki->nlink, fscki->references);
2204  goto out_dump;
2205  }
2206  }
2207  if (fscki->xattr_sz != fscki->calc_xsz) {
2208  ubifs_err("inode %lu has xattr size %u, but calculated size is %lld",
2209  (unsigned long)fscki->inum, fscki->xattr_sz,
2210  fscki->calc_xsz);
2211  goto out_dump;
2212  }
2213  if (fscki->xattr_cnt != fscki->calc_xcnt) {
2214  ubifs_err("inode %lu has %u xattrs, but calculated count is %lld",
2215  (unsigned long)fscki->inum,
2216  fscki->xattr_cnt, fscki->calc_xcnt);
2217  goto out_dump;
2218  }
2219  if (fscki->xattr_nms != fscki->calc_xnms) {
2220  ubifs_err("inode %lu has xattr names' size %u, but calculated names' size is %lld",
2221  (unsigned long)fscki->inum, fscki->xattr_nms,
2222  fscki->calc_xnms);
2223  goto out_dump;
2224  }
2225  }
2226 
2227  return 0;
2228 
2229 out_dump:
2230  /* Read the bad inode and dump it */
2231  ino_key_init(c, &key, fscki->inum);
2232  err = ubifs_lookup_level0(c, &key, &znode, &n);
2233  if (!err) {
2234  ubifs_err("inode %lu not found in index",
2235  (unsigned long)fscki->inum);
2236  return -ENOENT;
2237  } else if (err < 0) {
2238  ubifs_err("error %d while looking up inode %lu",
2239  err, (unsigned long)fscki->inum);
2240  return err;
2241  }
2242 
2243  zbr = &znode->zbranch[n];
2244  ino = kmalloc(zbr->len, GFP_NOFS);
2245  if (!ino)
2246  return -ENOMEM;
2247 
2248  err = ubifs_tnc_read_node(c, zbr, ino);
2249  if (err) {
2250  ubifs_err("cannot read inode node at LEB %d:%d, error %d",
2251  zbr->lnum, zbr->offs, err);
2252  kfree(ino);
2253  return err;
2254  }
2255 
2256  ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
2257  (unsigned long)fscki->inum, zbr->lnum, zbr->offs);
2258  ubifs_dump_node(c, ino);
2259  kfree(ino);
2260  return -EINVAL;
2261 }
2262 
2277 {
2278  int err;
2279  struct fsck_data fsckd;
2280 
2281  if (!dbg_is_chk_fs(c))
2282  return 0;
2283 
2284  fsckd.inodes = RB_ROOT;
2285  err = dbg_walk_index(c, check_leaf, NULL, &fsckd);
2286  if (err)
2287  goto out_free;
2288 
2289  err = check_inodes(c, &fsckd);
2290  if (err)
2291  goto out_free;
2292 
2293  free_inodes(&fsckd);
2294  return 0;
2295 
2296 out_free:
2297  ubifs_err("file-system check failed with error %d", err);
2298  dump_stack();
2299  free_inodes(&fsckd);
2300  return err;
2301 }
2302 
2312 {
2313  struct list_head *cur;
2314  struct ubifs_scan_node *sa, *sb;
2315 
2316  if (!dbg_is_chk_gen(c))
2317  return 0;
2318 
2319  for (cur = head->next; cur->next != head; cur = cur->next) {
2320  ino_t inuma, inumb;
2321  uint32_t blka, blkb;
2322 
2323  cond_resched();
2324  sa = container_of(cur, struct ubifs_scan_node, list);
2325  sb = container_of(cur->next, struct ubifs_scan_node, list);
2326 
2327  if (sa->type != UBIFS_DATA_NODE) {
2328  ubifs_err("bad node type %d", sa->type);
2329  ubifs_dump_node(c, sa->node);
2330  return -EINVAL;
2331  }
2332  if (sb->type != UBIFS_DATA_NODE) {
2333  ubifs_err("bad node type %d", sb->type);
2334  ubifs_dump_node(c, sb->node);
2335  return -EINVAL;
2336  }
2337 
2338  inuma = key_inum(c, &sa->key);
2339  inumb = key_inum(c, &sb->key);
2340 
2341  if (inuma < inumb)
2342  continue;
2343  if (inuma > inumb) {
2344  ubifs_err("larger inum %lu goes before inum %lu",
2345  (unsigned long)inuma, (unsigned long)inumb);
2346  goto error_dump;
2347  }
2348 
2349  blka = key_block(c, &sa->key);
2350  blkb = key_block(c, &sb->key);
2351 
2352  if (blka > blkb) {
2353  ubifs_err("larger block %u goes before %u", blka, blkb);
2354  goto error_dump;
2355  }
2356  if (blka == blkb) {
2357  ubifs_err("two data nodes for the same block");
2358  goto error_dump;
2359  }
2360  }
2361 
2362  return 0;
2363 
2364 error_dump:
2365  ubifs_dump_node(c, sa->node);
2366  ubifs_dump_node(c, sb->node);
2367  return -EINVAL;
2368 }
2369 
2379 {
2380  struct list_head *cur;
2381  struct ubifs_scan_node *sa, *sb;
2382 
2383  if (!dbg_is_chk_gen(c))
2384  return 0;
2385 
2386  for (cur = head->next; cur->next != head; cur = cur->next) {
2387  ino_t inuma, inumb;
2388  uint32_t hasha, hashb;
2389 
2390  cond_resched();
2391  sa = container_of(cur, struct ubifs_scan_node, list);
2392  sb = container_of(cur->next, struct ubifs_scan_node, list);
2393 
2394  if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
2395  sa->type != UBIFS_XENT_NODE) {
2396  ubifs_err("bad node type %d", sa->type);
2397  ubifs_dump_node(c, sa->node);
2398  return -EINVAL;
2399  }
2400  if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
2401  sa->type != UBIFS_XENT_NODE) {
2402  ubifs_err("bad node type %d", sb->type);
2403  ubifs_dump_node(c, sb->node);
2404  return -EINVAL;
2405  }
2406 
2407  if (sa->type != UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
2408  ubifs_err("non-inode node goes before inode node");
2409  goto error_dump;
2410  }
2411 
2412  if (sa->type == UBIFS_INO_NODE && sb->type != UBIFS_INO_NODE)
2413  continue;
2414 
2415  if (sa->type == UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
2416  /* Inode nodes are sorted in descending size order */
2417  if (sa->len < sb->len) {
2418  ubifs_err("smaller inode node goes first");
2419  goto error_dump;
2420  }
2421  continue;
2422  }
2423 
2424  /*
2425  * This is either a dentry or xentry, which should be sorted in
2426  * ascending (parent ino, hash) order.
2427  */
2428  inuma = key_inum(c, &sa->key);
2429  inumb = key_inum(c, &sb->key);
2430 
2431  if (inuma < inumb)
2432  continue;
2433  if (inuma > inumb) {
2434  ubifs_err("larger inum %lu goes before inum %lu",
2435  (unsigned long)inuma, (unsigned long)inumb);
2436  goto error_dump;
2437  }
2438 
2439  hasha = key_block(c, &sa->key);
2440  hashb = key_block(c, &sb->key);
2441 
2442  if (hasha > hashb) {
2443  ubifs_err("larger hash %u goes before %u",
2444  hasha, hashb);
2445  goto error_dump;
2446  }
2447  }
2448 
2449  return 0;
2450 
2451 error_dump:
2452  ubifs_msg("dumping first node");
2453  ubifs_dump_node(c, sa->node);
2454  ubifs_msg("dumping second node");
2455  ubifs_dump_node(c, sb->node);
2456  return -EINVAL;
2457  return 0;
2458 }
2459 
2460 static inline int chance(unsigned int n, unsigned int out_of)
2461 {
2462  return !!((random32() % out_of) + 1 <= n);
2463 
2464 }
2465 
2466 static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
2467 {
2468  struct ubifs_debug_info *d = c->dbg;
2469 
2470  ubifs_assert(dbg_is_tst_rcvry(c));
2471 
2472  if (!d->pc_cnt) {
2473  /* First call - decide delay to the power cut */
2474  if (chance(1, 2)) {
2475  unsigned long delay;
2476 
2477  if (chance(1, 2)) {
2478  d->pc_delay = 1;
2479  /* Fail withing 1 minute */
2480  delay = random32() % 60000;
2481  d->pc_timeout = jiffies;
2482  d->pc_timeout += msecs_to_jiffies(delay);
2483  ubifs_warn("failing after %lums", delay);
2484  } else {
2485  d->pc_delay = 2;
2486  delay = random32() % 10000;
2487  /* Fail within 10000 operations */
2488  d->pc_cnt_max = delay;
2489  ubifs_warn("failing after %lu calls", delay);
2490  }
2491  }
2492 
2493  d->pc_cnt += 1;
2494  }
2495 
2496  /* Determine if failure delay has expired */
2497  if (d->pc_delay == 1 && time_before(jiffies, d->pc_timeout))
2498  return 0;
2499  if (d->pc_delay == 2 && d->pc_cnt++ < d->pc_cnt_max)
2500  return 0;
2501 
2502  if (lnum == UBIFS_SB_LNUM) {
2503  if (write && chance(1, 2))
2504  return 0;
2505  if (chance(19, 20))
2506  return 0;
2507  ubifs_warn("failing in super block LEB %d", lnum);
2508  } else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) {
2509  if (chance(19, 20))
2510  return 0;
2511  ubifs_warn("failing in master LEB %d", lnum);
2512  } else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) {
2513  if (write && chance(99, 100))
2514  return 0;
2515  if (chance(399, 400))
2516  return 0;
2517  ubifs_warn("failing in log LEB %d", lnum);
2518  } else if (lnum >= c->lpt_first && lnum <= c->lpt_last) {
2519  if (write && chance(7, 8))
2520  return 0;
2521  if (chance(19, 20))
2522  return 0;
2523  ubifs_warn("failing in LPT LEB %d", lnum);
2524  } else if (lnum >= c->orph_first && lnum <= c->orph_last) {
2525  if (write && chance(1, 2))
2526  return 0;
2527  if (chance(9, 10))
2528  return 0;
2529  ubifs_warn("failing in orphan LEB %d", lnum);
2530  } else if (lnum == c->ihead_lnum) {
2531  if (chance(99, 100))
2532  return 0;
2533  ubifs_warn("failing in index head LEB %d", lnum);
2534  } else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) {
2535  if (chance(9, 10))
2536  return 0;
2537  ubifs_warn("failing in GC head LEB %d", lnum);
2538  } else if (write && !RB_EMPTY_ROOT(&c->buds) &&
2539  !ubifs_search_bud(c, lnum)) {
2540  if (chance(19, 20))
2541  return 0;
2542  ubifs_warn("failing in non-bud LEB %d", lnum);
2543  } else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND ||
2545  if (chance(999, 1000))
2546  return 0;
2547  ubifs_warn("failing in bud LEB %d commit running", lnum);
2548  } else {
2549  if (chance(9999, 10000))
2550  return 0;
2551  ubifs_warn("failing in bud LEB %d commit not running", lnum);
2552  }
2553 
2554  d->pc_happened = 1;
2555  ubifs_warn("========== Power cut emulated ==========");
2556  dump_stack();
2557  return 1;
2558 }
2559 
2560 static int corrupt_data(const struct ubifs_info *c, const void *buf,
2561  unsigned int len)
2562 {
2563  unsigned int from, to, i, ffs = chance(1, 2);
2564  unsigned char *p = (void *)buf;
2565 
2566  from = random32() % (len + 1);
2567  /* Corruption may only span one max. write unit */
2568  to = min(len, ALIGN(from, c->max_write_size));
2569 
2570  ubifs_warn("filled bytes %u-%u with %s", from, to - 1,
2571  ffs ? "0xFFs" : "random data");
2572 
2573  if (ffs)
2574  for (i = from; i < to; i++)
2575  p[i] = 0xFF;
2576  else
2577  for (i = from; i < to; i++)
2578  p[i] = random32() % 0x100;
2579 
2580  return to;
2581 }
2582 
2583 int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
2584  int offs, int len)
2585 {
2586  int err, failing;
2587 
2588  if (c->dbg->pc_happened)
2589  return -EROFS;
2590 
2591  failing = power_cut_emulated(c, lnum, 1);
2592  if (failing)
2593  len = corrupt_data(c, buf, len);
2594  ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)",
2595  len, lnum, offs);
2596  err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
2597  if (err)
2598  return err;
2599  if (failing)
2600  return -EROFS;
2601  return 0;
2602 }
2603 
2604 int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf,
2605  int len)
2606 {
2607  int err;
2608 
2609  if (c->dbg->pc_happened)
2610  return -EROFS;
2611  if (power_cut_emulated(c, lnum, 1))
2612  return -EROFS;
2613  err = ubi_leb_change(c->ubi, lnum, buf, len);
2614  if (err)
2615  return err;
2616  if (power_cut_emulated(c, lnum, 1))
2617  return -EROFS;
2618  return 0;
2619 }
2620 
2621 int dbg_leb_unmap(struct ubifs_info *c, int lnum)
2622 {
2623  int err;
2624 
2625  if (c->dbg->pc_happened)
2626  return -EROFS;
2627  if (power_cut_emulated(c, lnum, 0))
2628  return -EROFS;
2629  err = ubi_leb_unmap(c->ubi, lnum);
2630  if (err)
2631  return err;
2632  if (power_cut_emulated(c, lnum, 0))
2633  return -EROFS;
2634  return 0;
2635 }
2636 
2637 int dbg_leb_map(struct ubifs_info *c, int lnum)
2638 {
2639  int err;
2640 
2641  if (c->dbg->pc_happened)
2642  return -EROFS;
2643  if (power_cut_emulated(c, lnum, 0))
2644  return -EROFS;
2645  err = ubi_leb_map(c->ubi, lnum);
2646  if (err)
2647  return err;
2648  if (power_cut_emulated(c, lnum, 0))
2649  return -EROFS;
2650  return 0;
2651 }
2652 
2653 /*
2654  * Root directory for UBIFS stuff in debugfs. Contains sub-directories which
2655  * contain the stuff specific to particular file-system mounts.
2656  */
2657 static struct dentry *dfs_rootdir;
2658 
2659 static int dfs_file_open(struct inode *inode, struct file *file)
2660 {
2661  file->private_data = inode->i_private;
2662  return nonseekable_open(inode, file);
2663 }
2664 
2677 static int provide_user_output(int val, char __user *u, size_t count,
2678  loff_t *ppos)
2679 {
2680  char buf[3];
2681 
2682  if (val)
2683  buf[0] = '1';
2684  else
2685  buf[0] = '0';
2686  buf[1] = '\n';
2687  buf[2] = 0x00;
2688 
2689  return simple_read_from_buffer(u, count, ppos, buf, 2);
2690 }
2691 
2692 static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count,
2693  loff_t *ppos)
2694 {
2695  struct dentry *dent = file->f_path.dentry;
2696  struct ubifs_info *c = file->private_data;
2697  struct ubifs_debug_info *d = c->dbg;
2698  int val;
2699 
2700  if (dent == d->dfs_chk_gen)
2701  val = d->chk_gen;
2702  else if (dent == d->dfs_chk_index)
2703  val = d->chk_index;
2704  else if (dent == d->dfs_chk_orph)
2705  val = d->chk_orph;
2706  else if (dent == d->dfs_chk_lprops)
2707  val = d->chk_lprops;
2708  else if (dent == d->dfs_chk_fs)
2709  val = d->chk_fs;
2710  else if (dent == d->dfs_tst_rcvry)
2711  val = d->tst_rcvry;
2712  else if (dent == d->dfs_ro_error)
2713  val = c->ro_error;
2714  else
2715  return -EINVAL;
2716 
2717  return provide_user_output(val, u, count, ppos);
2718 }
2719 
2729 static int interpret_user_input(const char __user *u, size_t count)
2730 {
2731  size_t buf_size;
2732  char buf[8];
2733 
2734  buf_size = min_t(size_t, count, (sizeof(buf) - 1));
2735  if (copy_from_user(buf, u, buf_size))
2736  return -EFAULT;
2737 
2738  if (buf[0] == '1')
2739  return 1;
2740  else if (buf[0] == '0')
2741  return 0;
2742 
2743  return -EINVAL;
2744 }
2745 
2746 static ssize_t dfs_file_write(struct file *file, const char __user *u,
2747  size_t count, loff_t *ppos)
2748 {
2749  struct ubifs_info *c = file->private_data;
2750  struct ubifs_debug_info *d = c->dbg;
2751  struct dentry *dent = file->f_path.dentry;
2752  int val;
2753 
2754  /*
2755  * TODO: this is racy - the file-system might have already been
2756  * unmounted and we'd oops in this case. The plan is to fix it with
2757  * help of 'iterate_supers_type()' which we should have in v3.0: when
2758  * a debugfs opened, we rember FS's UUID in file->private_data. Then
2759  * whenever we access the FS via a debugfs file, we iterate all UBIFS
2760  * superblocks and fine the one with the same UUID, and take the
2761  * locking right.
2762  *
2763  * The other way to go suggested by Al Viro is to create a separate
2764  * 'ubifs-debug' file-system instead.
2765  */
2766  if (file->f_path.dentry == d->dfs_dump_lprops) {
2767  ubifs_dump_lprops(c);
2768  return count;
2769  }
2770  if (file->f_path.dentry == d->dfs_dump_budg) {
2771  ubifs_dump_budg(c, &c->bi);
2772  return count;
2773  }
2774  if (file->f_path.dentry == d->dfs_dump_tnc) {
2775  mutex_lock(&c->tnc_mutex);
2776  ubifs_dump_tnc(c);
2777  mutex_unlock(&c->tnc_mutex);
2778  return count;
2779  }
2780 
2781  val = interpret_user_input(u, count);
2782  if (val < 0)
2783  return val;
2784 
2785  if (dent == d->dfs_chk_gen)
2786  d->chk_gen = val;
2787  else if (dent == d->dfs_chk_index)
2788  d->chk_index = val;
2789  else if (dent == d->dfs_chk_orph)
2790  d->chk_orph = val;
2791  else if (dent == d->dfs_chk_lprops)
2792  d->chk_lprops = val;
2793  else if (dent == d->dfs_chk_fs)
2794  d->chk_fs = val;
2795  else if (dent == d->dfs_tst_rcvry)
2796  d->tst_rcvry = val;
2797  else if (dent == d->dfs_ro_error)
2798  c->ro_error = !!val;
2799  else
2800  return -EINVAL;
2801 
2802  return count;
2803 }
2804 
2805 static const struct file_operations dfs_fops = {
2806  .open = dfs_file_open,
2807  .read = dfs_file_read,
2808  .write = dfs_file_write,
2809  .owner = THIS_MODULE,
2810  .llseek = no_llseek,
2811 };
2812 
2826 {
2827  int err, n;
2828  const char *fname;
2829  struct dentry *dent;
2830  struct ubifs_debug_info *d = c->dbg;
2831 
2832  if (!IS_ENABLED(CONFIG_DEBUG_FS))
2833  return 0;
2834 
2836  c->vi.ubi_num, c->vi.vol_id);
2837  if (n == UBIFS_DFS_DIR_LEN) {
2838  /* The array size is too small */
2839  fname = UBIFS_DFS_DIR_NAME;
2840  dent = ERR_PTR(-EINVAL);
2841  goto out;
2842  }
2843 
2844  fname = d->dfs_dir_name;
2845  dent = debugfs_create_dir(fname, dfs_rootdir);
2846  if (IS_ERR_OR_NULL(dent))
2847  goto out;
2848  d->dfs_dir = dent;
2849 
2850  fname = "dump_lprops";
2851  dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
2852  if (IS_ERR_OR_NULL(dent))
2853  goto out_remove;
2854  d->dfs_dump_lprops = dent;
2855 
2856  fname = "dump_budg";
2857  dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
2858  if (IS_ERR_OR_NULL(dent))
2859  goto out_remove;
2860  d->dfs_dump_budg = dent;
2861 
2862  fname = "dump_tnc";
2863  dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
2864  if (IS_ERR_OR_NULL(dent))
2865  goto out_remove;
2866  d->dfs_dump_tnc = dent;
2867 
2868  fname = "chk_general";
2869  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
2870  &dfs_fops);
2871  if (IS_ERR_OR_NULL(dent))
2872  goto out_remove;
2873  d->dfs_chk_gen = dent;
2874 
2875  fname = "chk_index";
2876  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
2877  &dfs_fops);
2878  if (IS_ERR_OR_NULL(dent))
2879  goto out_remove;
2880  d->dfs_chk_index = dent;
2881 
2882  fname = "chk_orphans";
2883  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
2884  &dfs_fops);
2885  if (IS_ERR_OR_NULL(dent))
2886  goto out_remove;
2887  d->dfs_chk_orph = dent;
2888 
2889  fname = "chk_lprops";
2890  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
2891  &dfs_fops);
2892  if (IS_ERR_OR_NULL(dent))
2893  goto out_remove;
2894  d->dfs_chk_lprops = dent;
2895 
2896  fname = "chk_fs";
2897  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
2898  &dfs_fops);
2899  if (IS_ERR_OR_NULL(dent))
2900  goto out_remove;
2901  d->dfs_chk_fs = dent;
2902 
2903  fname = "tst_recovery";
2904  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
2905  &dfs_fops);
2906  if (IS_ERR_OR_NULL(dent))
2907  goto out_remove;
2908  d->dfs_tst_rcvry = dent;
2909 
2910  fname = "ro_error";
2911  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
2912  &dfs_fops);
2913  if (IS_ERR_OR_NULL(dent))
2914  goto out_remove;
2915  d->dfs_ro_error = dent;
2916 
2917  return 0;
2918 
2919 out_remove:
2921 out:
2922  err = dent ? PTR_ERR(dent) : -ENODEV;
2923  ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n",
2924  fname, err);
2925  return err;
2926 }
2927 
2933 {
2934  if (IS_ENABLED(CONFIG_DEBUG_FS))
2935  debugfs_remove_recursive(c->dbg->dfs_dir);
2936 }
2937 
2939 
2940 static struct dentry *dfs_chk_gen;
2941 static struct dentry *dfs_chk_index;
2942 static struct dentry *dfs_chk_orph;
2943 static struct dentry *dfs_chk_lprops;
2944 static struct dentry *dfs_chk_fs;
2945 static struct dentry *dfs_tst_rcvry;
2946 
2947 static ssize_t dfs_global_file_read(struct file *file, char __user *u,
2948  size_t count, loff_t *ppos)
2949 {
2950  struct dentry *dent = file->f_path.dentry;
2951  int val;
2952 
2953  if (dent == dfs_chk_gen)
2954  val = ubifs_dbg.chk_gen;
2955  else if (dent == dfs_chk_index)
2956  val = ubifs_dbg.chk_index;
2957  else if (dent == dfs_chk_orph)
2958  val = ubifs_dbg.chk_orph;
2959  else if (dent == dfs_chk_lprops)
2960  val = ubifs_dbg.chk_lprops;
2961  else if (dent == dfs_chk_fs)
2962  val = ubifs_dbg.chk_fs;
2963  else if (dent == dfs_tst_rcvry)
2964  val = ubifs_dbg.tst_rcvry;
2965  else
2966  return -EINVAL;
2967 
2968  return provide_user_output(val, u, count, ppos);
2969 }
2970 
2971 static ssize_t dfs_global_file_write(struct file *file, const char __user *u,
2972  size_t count, loff_t *ppos)
2973 {
2974  struct dentry *dent = file->f_path.dentry;
2975  int val;
2976 
2977  val = interpret_user_input(u, count);
2978  if (val < 0)
2979  return val;
2980 
2981  if (dent == dfs_chk_gen)
2982  ubifs_dbg.chk_gen = val;
2983  else if (dent == dfs_chk_index)
2984  ubifs_dbg.chk_index = val;
2985  else if (dent == dfs_chk_orph)
2986  ubifs_dbg.chk_orph = val;
2987  else if (dent == dfs_chk_lprops)
2988  ubifs_dbg.chk_lprops = val;
2989  else if (dent == dfs_chk_fs)
2990  ubifs_dbg.chk_fs = val;
2991  else if (dent == dfs_tst_rcvry)
2992  ubifs_dbg.tst_rcvry = val;
2993  else
2994  return -EINVAL;
2995 
2996  return count;
2997 }
2998 
2999 static const struct file_operations dfs_global_fops = {
3000  .read = dfs_global_file_read,
3001  .write = dfs_global_file_write,
3002  .owner = THIS_MODULE,
3003  .llseek = no_llseek,
3004 };
3005 
3015 {
3016  int err;
3017  const char *fname;
3018  struct dentry *dent;
3019 
3020  if (!IS_ENABLED(CONFIG_DEBUG_FS))
3021  return 0;
3022 
3023  fname = "ubifs";
3024  dent = debugfs_create_dir(fname, NULL);
3025  if (IS_ERR_OR_NULL(dent))
3026  goto out;
3027  dfs_rootdir = dent;
3028 
3029  fname = "chk_general";
3030  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
3031  &dfs_global_fops);
3032  if (IS_ERR_OR_NULL(dent))
3033  goto out_remove;
3034  dfs_chk_gen = dent;
3035 
3036  fname = "chk_index";
3037  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
3038  &dfs_global_fops);
3039  if (IS_ERR_OR_NULL(dent))
3040  goto out_remove;
3041  dfs_chk_index = dent;
3042 
3043  fname = "chk_orphans";
3044  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
3045  &dfs_global_fops);
3046  if (IS_ERR_OR_NULL(dent))
3047  goto out_remove;
3048  dfs_chk_orph = dent;
3049 
3050  fname = "chk_lprops";
3051  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
3052  &dfs_global_fops);
3053  if (IS_ERR_OR_NULL(dent))
3054  goto out_remove;
3055  dfs_chk_lprops = dent;
3056 
3057  fname = "chk_fs";
3058  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
3059  &dfs_global_fops);
3060  if (IS_ERR_OR_NULL(dent))
3061  goto out_remove;
3062  dfs_chk_fs = dent;
3063 
3064  fname = "tst_recovery";
3065  dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
3066  &dfs_global_fops);
3067  if (IS_ERR_OR_NULL(dent))
3068  goto out_remove;
3069  dfs_tst_rcvry = dent;
3070 
3071  return 0;
3072 
3073 out_remove:
3074  debugfs_remove_recursive(dfs_rootdir);
3075 out:
3076  err = dent ? PTR_ERR(dent) : -ENODEV;
3077  ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n",
3078  fname, err);
3079  return err;
3080 }
3081 
3086 {
3087  if (IS_ENABLED(CONFIG_DEBUG_FS))
3088  debugfs_remove_recursive(dfs_rootdir);
3089 }
3090 
3100 {
3101  c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL);
3102  if (!c->dbg)
3103  return -ENOMEM;
3104 
3105  return 0;
3106 }
3107 
3113 {
3114  kfree(c->dbg);
3115 }