Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
journal.c
Go to the documentation of this file.
1 /*
2  * This file is part of UBIFS.
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 51
17  * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18  *
19  * Authors: Artem Bityutskiy (Битюцкий Артём)
20  * Adrian Hunter
21  */
22 
23 /*
24  * This file implements UBIFS journal.
25  *
26  * The journal consists of 2 parts - the log and bud LEBs. The log has fixed
27  * length and position, while a bud logical eraseblock is any LEB in the main
28  * area. Buds contain file system data - data nodes, inode nodes, etc. The log
29  * contains only references to buds and some other stuff like commit
30  * start node. The idea is that when we commit the journal, we do
31  * not copy the data, the buds just become indexed. Since after the commit the
32  * nodes in bud eraseblocks become leaf nodes of the file system index tree, we
33  * use term "bud". Analogy is obvious, bud eraseblocks contain nodes which will
34  * become leafs in the future.
35  *
36  * The journal is multi-headed because we want to write data to the journal as
37  * optimally as possible. It is nice to have nodes belonging to the same inode
38  * in one LEB, so we may write data owned by different inodes to different
39  * journal heads, although at present only one data head is used.
40  *
41  * For recovery reasons, the base head contains all inode nodes, all directory
42  * entry nodes and all truncate nodes. This means that the other heads contain
43  * only data nodes.
44  *
45  * Bud LEBs may be half-indexed. For example, if the bud was not full at the
46  * time of commit, the bud is retained to continue to be used in the journal,
47  * even though the "front" of the LEB is now indexed. In that case, the log
48  * reference contains the offset where the bud starts for the purposes of the
49  * journal.
50  *
51  * The journal size has to be limited, because the larger is the journal, the
52  * longer it takes to mount UBIFS (scanning the journal) and the more memory it
53  * takes (indexing in the TNC).
54  *
55  * All the journal write operations like 'ubifs_jnl_update()' here, which write
56  * multiple UBIFS nodes to the journal at one go, are atomic with respect to
57  * unclean reboots. Should the unclean reboot happen, the recovery code drops
58  * all the nodes.
59  */
60 
61 #include "ubifs.h"
62 
67 static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
68 {
69  memset(ino->padding1, 0, 4);
70  memset(ino->padding2, 0, 26);
71 }
72 
78 static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
79 {
80  dent->padding1 = 0;
81  memset(dent->padding2, 0, 4);
82 }
83 
88 static inline void zero_data_node_unused(struct ubifs_data_node *data)
89 {
90  memset(data->padding, 0, 2);
91 }
92 
98 static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
99 {
100  memset(trun->padding, 0, 12);
101 }
102 
115 static int reserve_space(struct ubifs_info *c, int jhead, int len)
116 {
117  int err = 0, err1, retries = 0, avail, lnum, offs, squeeze;
118  struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
119 
120  /*
121  * Typically, the base head has smaller nodes written to it, so it is
122  * better to try to allocate space at the ends of eraseblocks. This is
123  * what the squeeze parameter does.
124  */
125  ubifs_assert(!c->ro_media && !c->ro_mount);
126  squeeze = (jhead == BASEHD);
127 again:
128  mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
129 
130  if (c->ro_error) {
131  err = -EROFS;
132  goto out_unlock;
133  }
134 
135  avail = c->leb_size - wbuf->offs - wbuf->used;
136  if (wbuf->lnum != -1 && avail >= len)
137  return 0;
138 
139  /*
140  * Write buffer wasn't seek'ed or there is no enough space - look for an
141  * LEB with some empty space.
142  */
143  lnum = ubifs_find_free_space(c, len, &offs, squeeze);
144  if (lnum >= 0)
145  goto out;
146 
147  err = lnum;
148  if (err != -ENOSPC)
149  goto out_unlock;
150 
151  /*
152  * No free space, we have to run garbage collector to make
153  * some. But the write-buffer mutex has to be unlocked because
154  * GC also takes it.
155  */
156  dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
157  mutex_unlock(&wbuf->io_mutex);
158 
159  lnum = ubifs_garbage_collect(c, 0);
160  if (lnum < 0) {
161  err = lnum;
162  if (err != -ENOSPC)
163  return err;
164 
165  /*
166  * GC could not make a free LEB. But someone else may
167  * have allocated new bud for this journal head,
168  * because we dropped @wbuf->io_mutex, so try once
169  * again.
170  */
171  dbg_jnl("GC couldn't make a free LEB for jhead %s",
172  dbg_jhead(jhead));
173  if (retries++ < 2) {
174  dbg_jnl("retry (%d)", retries);
175  goto again;
176  }
177 
178  dbg_jnl("return -ENOSPC");
179  return err;
180  }
181 
182  mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
183  dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
184  avail = c->leb_size - wbuf->offs - wbuf->used;
185 
186  if (wbuf->lnum != -1 && avail >= len) {
187  /*
188  * Someone else has switched the journal head and we have
189  * enough space now. This happens when more than one process is
190  * trying to write to the same journal head at the same time.
191  */
192  dbg_jnl("return LEB %d back, already have LEB %d:%d",
193  lnum, wbuf->lnum, wbuf->offs + wbuf->used);
194  err = ubifs_return_leb(c, lnum);
195  if (err)
196  goto out_unlock;
197  return 0;
198  }
199 
200  offs = 0;
201 
202 out:
203  /*
204  * Make sure we synchronize the write-buffer before we add the new bud
205  * to the log. Otherwise we may have a power cut after the log
206  * reference node for the last bud (@lnum) is written but before the
207  * write-buffer data are written to the next-to-last bud
208  * (@wbuf->lnum). And the effect would be that the recovery would see
209  * that there is corruption in the next-to-last bud.
210  */
211  err = ubifs_wbuf_sync_nolock(wbuf);
212  if (err)
213  goto out_return;
214  err = ubifs_add_bud_to_log(c, jhead, lnum, offs);
215  if (err)
216  goto out_return;
217  err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs);
218  if (err)
219  goto out_unlock;
220 
221  return 0;
222 
223 out_unlock:
224  mutex_unlock(&wbuf->io_mutex);
225  return err;
226 
227 out_return:
228  /* An error occurred and the LEB has to be returned to lprops */
229  ubifs_assert(err < 0);
230  err1 = ubifs_return_leb(c, lnum);
231  if (err1 && err == -EAGAIN)
232  /*
233  * Return original error code only if it is not %-EAGAIN,
234  * which is not really an error. Otherwise, return the error
235  * code of 'ubifs_return_leb()'.
236  */
237  err = err1;
238  mutex_unlock(&wbuf->io_mutex);
239  return err;
240 }
241 
255 static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
256  int *lnum, int *offs)
257 {
258  struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
259 
260  ubifs_assert(jhead != GCHD);
261 
262  *lnum = c->jheads[jhead].wbuf.lnum;
263  *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
264 
265  dbg_jnl("jhead %s, LEB %d:%d, len %d",
266  dbg_jhead(jhead), *lnum, *offs, len);
267  ubifs_prepare_node(c, node, len, 0);
268 
269  return ubifs_wbuf_write_nolock(wbuf, node, len);
270 }
271 
286 static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
287  int *lnum, int *offs, int sync)
288 {
289  int err;
290  struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
291 
292  ubifs_assert(jhead != GCHD);
293 
294  *lnum = c->jheads[jhead].wbuf.lnum;
295  *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
296  dbg_jnl("jhead %s, LEB %d:%d, len %d",
297  dbg_jhead(jhead), *lnum, *offs, len);
298 
299  err = ubifs_wbuf_write_nolock(wbuf, buf, len);
300  if (err)
301  return err;
302  if (sync)
303  err = ubifs_wbuf_sync_nolock(wbuf);
304  return err;
305 }
306 
323 static int make_reservation(struct ubifs_info *c, int jhead, int len)
324 {
325  int err, cmt_retries = 0, nospc_retries = 0;
326 
327 again:
328  down_read(&c->commit_sem);
329  err = reserve_space(c, jhead, len);
330  if (!err)
331  return 0;
332  up_read(&c->commit_sem);
333 
334  if (err == -ENOSPC) {
335  /*
336  * GC could not make any progress. We should try to commit
337  * once because it could make some dirty space and GC would
338  * make progress, so make the error -EAGAIN so that the below
339  * will commit and re-try.
340  */
341  if (nospc_retries++ < 2) {
342  dbg_jnl("no space, retry");
343  err = -EAGAIN;
344  }
345 
346  /*
347  * This means that the budgeting is incorrect. We always have
348  * to be able to write to the media, because all operations are
349  * budgeted. Deletions are not budgeted, though, but we reserve
350  * an extra LEB for them.
351  */
352  }
353 
354  if (err != -EAGAIN)
355  goto out;
356 
357  /*
358  * -EAGAIN means that the journal is full or too large, or the above
359  * code wants to do one commit. Do this and re-try.
360  */
361  if (cmt_retries > 128) {
362  /*
363  * This should not happen unless the journal size limitations
364  * are too tough.
365  */
366  ubifs_err("stuck in space allocation");
367  err = -ENOSPC;
368  goto out;
369  } else if (cmt_retries > 32)
370  ubifs_warn("too many space allocation re-tries (%d)",
371  cmt_retries);
372 
373  dbg_jnl("-EAGAIN, commit and retry (retried %d times)",
374  cmt_retries);
375  cmt_retries += 1;
376 
377  err = ubifs_run_commit(c);
378  if (err)
379  return err;
380  goto again;
381 
382 out:
383  ubifs_err("cannot reserve %d bytes in jhead %d, error %d",
384  len, jhead, err);
385  if (err == -ENOSPC) {
386  /* This are some budgeting problems, print useful information */
387  down_write(&c->commit_sem);
388  dump_stack();
389  ubifs_dump_budg(c, &c->bi);
391  cmt_retries = dbg_check_lprops(c);
392  up_write(&c->commit_sem);
393  }
394  return err;
395 }
396 
406 static inline void release_head(struct ubifs_info *c, int jhead)
407 {
408  mutex_unlock(&c->jheads[jhead].wbuf.io_mutex);
409 }
410 
418 static void finish_reservation(struct ubifs_info *c)
419 {
420  up_read(&c->commit_sem);
421 }
422 
427 static int get_dent_type(int mode)
428 {
429  switch (mode & S_IFMT) {
430  case S_IFREG:
431  return UBIFS_ITYPE_REG;
432  case S_IFDIR:
433  return UBIFS_ITYPE_DIR;
434  case S_IFLNK:
435  return UBIFS_ITYPE_LNK;
436  case S_IFBLK:
437  return UBIFS_ITYPE_BLK;
438  case S_IFCHR:
439  return UBIFS_ITYPE_CHR;
440  case S_IFIFO:
441  return UBIFS_ITYPE_FIFO;
442  case S_IFSOCK:
443  return UBIFS_ITYPE_SOCK;
444  default:
445  BUG();
446  }
447  return 0;
448 }
449 
457 static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
458  const struct inode *inode, int last)
459 {
460  int data_len = 0, last_reference = !inode->i_nlink;
461  struct ubifs_inode *ui = ubifs_inode(inode);
462 
463  ino->ch.node_type = UBIFS_INO_NODE;
464  ino_key_init_flash(c, &ino->key, inode->i_ino);
465  ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
466  ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
467  ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
468  ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
469  ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
470  ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
471  ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
472  ino->uid = cpu_to_le32(i_uid_read(inode));
473  ino->gid = cpu_to_le32(i_gid_read(inode));
474  ino->mode = cpu_to_le32(inode->i_mode);
475  ino->flags = cpu_to_le32(ui->flags);
476  ino->size = cpu_to_le64(ui->ui_size);
477  ino->nlink = cpu_to_le32(inode->i_nlink);
478  ino->compr_type = cpu_to_le16(ui->compr_type);
479  ino->data_len = cpu_to_le32(ui->data_len);
480  ino->xattr_cnt = cpu_to_le32(ui->xattr_cnt);
481  ino->xattr_size = cpu_to_le32(ui->xattr_size);
482  ino->xattr_names = cpu_to_le32(ui->xattr_names);
483  zero_ino_node_unused(ino);
484 
485  /*
486  * Drop the attached data if this is a deletion inode, the data is not
487  * needed anymore.
488  */
489  if (!last_reference) {
490  memcpy(ino->data, ui->data, ui->data_len);
491  data_len = ui->data_len;
492  }
493 
494  ubifs_prep_grp_node(c, ino, UBIFS_INO_NODE_SZ + data_len, last);
495 }
496 
507 static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
508 {
509  if (ui->dirty)
511  ui->dirty = 0;
512 }
513 
541 int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
542  const struct qstr *nm, const struct inode *inode,
543  int deletion, int xent)
544 {
545  int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
546  int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
547  int last_reference = !!(deletion && inode->i_nlink == 0);
548  struct ubifs_inode *ui = ubifs_inode(inode);
549  struct ubifs_inode *dir_ui = ubifs_inode(dir);
550  struct ubifs_dent_node *dent;
551  struct ubifs_ino_node *ino;
552  union ubifs_key dent_key, ino_key;
553 
554  dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
555  inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
556  ubifs_assert(dir_ui->data_len == 0);
557  ubifs_assert(mutex_is_locked(&dir_ui->ui_mutex));
558 
559  dlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
560  ilen = UBIFS_INO_NODE_SZ;
561 
562  /*
563  * If the last reference to the inode is being deleted, then there is
564  * no need to attach and write inode data, it is being deleted anyway.
565  * And if the inode is being deleted, no need to synchronize
566  * write-buffer even if the inode is synchronous.
567  */
568  if (!last_reference) {
569  ilen += ui->data_len;
570  sync |= IS_SYNC(inode);
571  }
572 
573  aligned_dlen = ALIGN(dlen, 8);
574  aligned_ilen = ALIGN(ilen, 8);
575  len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
576  dent = kmalloc(len, GFP_NOFS);
577  if (!dent)
578  return -ENOMEM;
579 
580  /* Make reservation before allocating sequence numbers */
581  err = make_reservation(c, BASEHD, len);
582  if (err)
583  goto out_free;
584 
585  if (!xent) {
586  dent->ch.node_type = UBIFS_DENT_NODE;
587  dent_key_init(c, &dent_key, dir->i_ino, nm);
588  } else {
589  dent->ch.node_type = UBIFS_XENT_NODE;
590  xent_key_init(c, &dent_key, dir->i_ino, nm);
591  }
592 
593  key_write(c, &dent_key, dent->key);
594  dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
595  dent->type = get_dent_type(inode->i_mode);
596  dent->nlen = cpu_to_le16(nm->len);
597  memcpy(dent->name, nm->name, nm->len);
598  dent->name[nm->len] = '\0';
599  zero_dent_node_unused(dent);
600  ubifs_prep_grp_node(c, dent, dlen, 0);
601 
602  ino = (void *)dent + aligned_dlen;
603  pack_inode(c, ino, inode, 0);
604  ino = (void *)ino + aligned_ilen;
605  pack_inode(c, ino, dir, 1);
606 
607  if (last_reference) {
608  err = ubifs_add_orphan(c, inode->i_ino);
609  if (err) {
610  release_head(c, BASEHD);
611  goto out_finish;
612  }
613  ui->del_cmtno = c->cmt_no;
614  }
615 
616  err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync);
617  if (err)
618  goto out_release;
619  if (!sync) {
620  struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
621 
622  ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
623  ubifs_wbuf_add_ino_nolock(wbuf, dir->i_ino);
624  }
625  release_head(c, BASEHD);
626  kfree(dent);
627 
628  if (deletion) {
629  err = ubifs_tnc_remove_nm(c, &dent_key, nm);
630  if (err)
631  goto out_ro;
632  err = ubifs_add_dirt(c, lnum, dlen);
633  } else
634  err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm);
635  if (err)
636  goto out_ro;
637 
638  /*
639  * Note, we do not remove the inode from TNC even if the last reference
640  * to it has just been deleted, because the inode may still be opened.
641  * Instead, the inode has been added to orphan lists and the orphan
642  * subsystem will take further care about it.
643  */
644  ino_key_init(c, &ino_key, inode->i_ino);
645  ino_offs = dent_offs + aligned_dlen;
646  err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen);
647  if (err)
648  goto out_ro;
649 
650  ino_key_init(c, &ino_key, dir->i_ino);
651  ino_offs += aligned_ilen;
652  err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ);
653  if (err)
654  goto out_ro;
655 
656  finish_reservation(c);
657  spin_lock(&ui->ui_lock);
658  ui->synced_i_size = ui->ui_size;
659  spin_unlock(&ui->ui_lock);
660  mark_inode_clean(c, ui);
661  mark_inode_clean(c, dir_ui);
662  return 0;
663 
664 out_finish:
665  finish_reservation(c);
666 out_free:
667  kfree(dent);
668  return err;
669 
670 out_release:
671  release_head(c, BASEHD);
672  kfree(dent);
673 out_ro:
674  ubifs_ro_mode(c, err);
675  if (last_reference)
676  ubifs_delete_orphan(c, inode->i_ino);
677  finish_reservation(c);
678  return err;
679 }
680 
692 int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
693  const union ubifs_key *key, const void *buf, int len)
694 {
695  struct ubifs_data_node *data;
696  int err, lnum, offs, compr_type, out_len;
697  int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
698  struct ubifs_inode *ui = ubifs_inode(inode);
699 
700  dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
701  (unsigned long)key_inum(c, key), key_block(c, key), len);
703 
704  data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN);
705  if (!data) {
706  /*
707  * Fall-back to the write reserve buffer. Note, we might be
708  * currently on the memory reclaim path, when the kernel is
709  * trying to free some memory by writing out dirty pages. The
710  * write reserve buffer helps us to guarantee that we are
711  * always able to write the data.
712  */
713  allocated = 0;
715  data = c->write_reserve_buf;
716  }
717 
718  data->ch.node_type = UBIFS_DATA_NODE;
719  key_write(c, key, &data->key);
720  data->size = cpu_to_le32(len);
721  zero_data_node_unused(data);
722 
723  if (!(ui->flags & UBIFS_COMPR_FL))
724  /* Compression is disabled for this inode */
725  compr_type = UBIFS_COMPR_NONE;
726  else
727  compr_type = ui->compr_type;
728 
729  out_len = dlen - UBIFS_DATA_NODE_SZ;
730  ubifs_compress(buf, len, &data->data, &out_len, &compr_type);
731  ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
732 
733  dlen = UBIFS_DATA_NODE_SZ + out_len;
734  data->compr_type = cpu_to_le16(compr_type);
735 
736  /* Make reservation before allocating sequence numbers */
737  err = make_reservation(c, DATAHD, dlen);
738  if (err)
739  goto out_free;
740 
741  err = write_node(c, DATAHD, data, dlen, &lnum, &offs);
742  if (err)
743  goto out_release;
744  ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
745  release_head(c, DATAHD);
746 
747  err = ubifs_tnc_add(c, key, lnum, offs, dlen);
748  if (err)
749  goto out_ro;
750 
751  finish_reservation(c);
752  if (!allocated)
754  else
755  kfree(data);
756  return 0;
757 
758 out_release:
759  release_head(c, DATAHD);
760 out_ro:
761  ubifs_ro_mode(c, err);
762  finish_reservation(c);
763 out_free:
764  if (!allocated)
766  else
767  kfree(data);
768  return err;
769 }
770 
780 int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
781 {
782  int err, lnum, offs;
783  struct ubifs_ino_node *ino;
784  struct ubifs_inode *ui = ubifs_inode(inode);
785  int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink;
786 
787  dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
788 
789  /*
790  * If the inode is being deleted, do not write the attached data. No
791  * need to synchronize the write-buffer either.
792  */
793  if (!last_reference) {
794  len += ui->data_len;
795  sync = IS_SYNC(inode);
796  }
797  ino = kmalloc(len, GFP_NOFS);
798  if (!ino)
799  return -ENOMEM;
800 
801  /* Make reservation before allocating sequence numbers */
802  err = make_reservation(c, BASEHD, len);
803  if (err)
804  goto out_free;
805 
806  pack_inode(c, ino, inode, 1);
807  err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
808  if (err)
809  goto out_release;
810  if (!sync)
812  inode->i_ino);
813  release_head(c, BASEHD);
814 
815  if (last_reference) {
816  err = ubifs_tnc_remove_ino(c, inode->i_ino);
817  if (err)
818  goto out_ro;
819  ubifs_delete_orphan(c, inode->i_ino);
820  err = ubifs_add_dirt(c, lnum, len);
821  } else {
822  union ubifs_key key;
823 
824  ino_key_init(c, &key, inode->i_ino);
825  err = ubifs_tnc_add(c, &key, lnum, offs, len);
826  }
827  if (err)
828  goto out_ro;
829 
830  finish_reservation(c);
831  spin_lock(&ui->ui_lock);
832  ui->synced_i_size = ui->ui_size;
833  spin_unlock(&ui->ui_lock);
834  kfree(ino);
835  return 0;
836 
837 out_release:
838  release_head(c, BASEHD);
839 out_ro:
840  ubifs_ro_mode(c, err);
841  finish_reservation(c);
842 out_free:
843  kfree(ino);
844  return err;
845 }
846 
876 int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
877 {
878  int err;
879  struct ubifs_inode *ui = ubifs_inode(inode);
880 
881  ubifs_assert(inode->i_nlink == 0);
882 
883  if (ui->del_cmtno != c->cmt_no)
884  /* A commit happened for sure */
885  return ubifs_jnl_write_inode(c, inode);
886 
887  down_read(&c->commit_sem);
888  /*
889  * Check commit number again, because the first test has been done
890  * without @c->commit_sem, so a commit might have happened.
891  */
892  if (ui->del_cmtno != c->cmt_no) {
893  up_read(&c->commit_sem);
894  return ubifs_jnl_write_inode(c, inode);
895  }
896 
897  err = ubifs_tnc_remove_ino(c, inode->i_ino);
898  if (err)
899  ubifs_ro_mode(c, err);
900  else
901  ubifs_delete_orphan(c, inode->i_ino);
902  up_read(&c->commit_sem);
903  return err;
904 }
905 
920 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
921  const struct dentry *old_dentry,
922  const struct inode *new_dir,
923  const struct dentry *new_dentry, int sync)
924 {
925  void *p;
926  union ubifs_key key;
927  struct ubifs_dent_node *dent, *dent2;
928  int err, dlen1, dlen2, ilen, lnum, offs, len;
929  const struct inode *old_inode = old_dentry->d_inode;
930  const struct inode *new_inode = new_dentry->d_inode;
931  int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
932  int last_reference = !!(new_inode && new_inode->i_nlink == 0);
933  int move = (old_dir != new_dir);
934  struct ubifs_inode *uninitialized_var(new_ui);
935 
936  dbg_jnl("dent '%.*s' in dir ino %lu to dent '%.*s' in dir ino %lu",
937  old_dentry->d_name.len, old_dentry->d_name.name,
938  old_dir->i_ino, new_dentry->d_name.len,
939  new_dentry->d_name.name, new_dir->i_ino);
940  ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
941  ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
942  ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
943  ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
944 
945  dlen1 = UBIFS_DENT_NODE_SZ + new_dentry->d_name.len + 1;
946  dlen2 = UBIFS_DENT_NODE_SZ + old_dentry->d_name.len + 1;
947  if (new_inode) {
948  new_ui = ubifs_inode(new_inode);
949  ubifs_assert(mutex_is_locked(&new_ui->ui_mutex));
950  ilen = UBIFS_INO_NODE_SZ;
951  if (!last_reference)
952  ilen += new_ui->data_len;
953  } else
954  ilen = 0;
955 
956  aligned_dlen1 = ALIGN(dlen1, 8);
957  aligned_dlen2 = ALIGN(dlen2, 8);
958  len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
959  if (old_dir != new_dir)
960  len += plen;
961  dent = kmalloc(len, GFP_NOFS);
962  if (!dent)
963  return -ENOMEM;
964 
965  /* Make reservation before allocating sequence numbers */
966  err = make_reservation(c, BASEHD, len);
967  if (err)
968  goto out_free;
969 
970  /* Make new dent */
971  dent->ch.node_type = UBIFS_DENT_NODE;
972  dent_key_init_flash(c, &dent->key, new_dir->i_ino, &new_dentry->d_name);
973  dent->inum = cpu_to_le64(old_inode->i_ino);
974  dent->type = get_dent_type(old_inode->i_mode);
975  dent->nlen = cpu_to_le16(new_dentry->d_name.len);
976  memcpy(dent->name, new_dentry->d_name.name, new_dentry->d_name.len);
977  dent->name[new_dentry->d_name.len] = '\0';
978  zero_dent_node_unused(dent);
979  ubifs_prep_grp_node(c, dent, dlen1, 0);
980 
981  /* Make deletion dent */
982  dent2 = (void *)dent + aligned_dlen1;
983  dent2->ch.node_type = UBIFS_DENT_NODE;
984  dent_key_init_flash(c, &dent2->key, old_dir->i_ino,
985  &old_dentry->d_name);
986  dent2->inum = 0;
987  dent2->type = DT_UNKNOWN;
988  dent2->nlen = cpu_to_le16(old_dentry->d_name.len);
989  memcpy(dent2->name, old_dentry->d_name.name, old_dentry->d_name.len);
990  dent2->name[old_dentry->d_name.len] = '\0';
991  zero_dent_node_unused(dent2);
992  ubifs_prep_grp_node(c, dent2, dlen2, 0);
993 
994  p = (void *)dent2 + aligned_dlen2;
995  if (new_inode) {
996  pack_inode(c, p, new_inode, 0);
997  p += ALIGN(ilen, 8);
998  }
999 
1000  if (!move)
1001  pack_inode(c, p, old_dir, 1);
1002  else {
1003  pack_inode(c, p, old_dir, 0);
1004  p += ALIGN(plen, 8);
1005  pack_inode(c, p, new_dir, 1);
1006  }
1007 
1008  if (last_reference) {
1009  err = ubifs_add_orphan(c, new_inode->i_ino);
1010  if (err) {
1011  release_head(c, BASEHD);
1012  goto out_finish;
1013  }
1014  new_ui->del_cmtno = c->cmt_no;
1015  }
1016 
1017  err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync);
1018  if (err)
1019  goto out_release;
1020  if (!sync) {
1021  struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1022 
1023  ubifs_wbuf_add_ino_nolock(wbuf, new_dir->i_ino);
1024  ubifs_wbuf_add_ino_nolock(wbuf, old_dir->i_ino);
1025  if (new_inode)
1027  new_inode->i_ino);
1028  }
1029  release_head(c, BASEHD);
1030 
1031  dent_key_init(c, &key, new_dir->i_ino, &new_dentry->d_name);
1032  err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, &new_dentry->d_name);
1033  if (err)
1034  goto out_ro;
1035 
1036  err = ubifs_add_dirt(c, lnum, dlen2);
1037  if (err)
1038  goto out_ro;
1039 
1040  dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
1041  err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name);
1042  if (err)
1043  goto out_ro;
1044 
1045  offs += aligned_dlen1 + aligned_dlen2;
1046  if (new_inode) {
1047  ino_key_init(c, &key, new_inode->i_ino);
1048  err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
1049  if (err)
1050  goto out_ro;
1051  offs += ALIGN(ilen, 8);
1052  }
1053 
1054  ino_key_init(c, &key, old_dir->i_ino);
1055  err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1056  if (err)
1057  goto out_ro;
1058 
1059  if (old_dir != new_dir) {
1060  offs += ALIGN(plen, 8);
1061  ino_key_init(c, &key, new_dir->i_ino);
1062  err = ubifs_tnc_add(c, &key, lnum, offs, plen);
1063  if (err)
1064  goto out_ro;
1065  }
1066 
1067  finish_reservation(c);
1068  if (new_inode) {
1069  mark_inode_clean(c, new_ui);
1070  spin_lock(&new_ui->ui_lock);
1071  new_ui->synced_i_size = new_ui->ui_size;
1072  spin_unlock(&new_ui->ui_lock);
1073  }
1074  mark_inode_clean(c, ubifs_inode(old_dir));
1075  if (move)
1076  mark_inode_clean(c, ubifs_inode(new_dir));
1077  kfree(dent);
1078  return 0;
1079 
1080 out_release:
1081  release_head(c, BASEHD);
1082 out_ro:
1083  ubifs_ro_mode(c, err);
1084  if (last_reference)
1085  ubifs_delete_orphan(c, new_inode->i_ino);
1086 out_finish:
1087  finish_reservation(c);
1088 out_free:
1089  kfree(dent);
1090  return err;
1091 }
1092 
1101 static int recomp_data_node(struct ubifs_data_node *dn, int *new_len)
1102 {
1103  void *buf;
1104  int err, len, compr_type, out_len;
1105 
1106  out_len = le32_to_cpu(dn->size);
1107  buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
1108  if (!buf)
1109  return -ENOMEM;
1110 
1111  len = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
1112  compr_type = le16_to_cpu(dn->compr_type);
1113  err = ubifs_decompress(&dn->data, len, buf, &out_len, compr_type);
1114  if (err)
1115  goto out;
1116 
1117  ubifs_compress(buf, *new_len, &dn->data, &out_len, &compr_type);
1118  ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
1119  dn->compr_type = cpu_to_le16(compr_type);
1120  dn->size = cpu_to_le32(*new_len);
1121  *new_len = UBIFS_DATA_NODE_SZ + out_len;
1122 out:
1123  kfree(buf);
1124  return err;
1125 }
1126 
1142 int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
1143  loff_t old_size, loff_t new_size)
1144 {
1145  union ubifs_key key, to_key;
1146  struct ubifs_ino_node *ino;
1147  struct ubifs_trun_node *trun;
1148  struct ubifs_data_node *uninitialized_var(dn);
1149  int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode);
1150  struct ubifs_inode *ui = ubifs_inode(inode);
1151  ino_t inum = inode->i_ino;
1152  unsigned int blk;
1153 
1154  dbg_jnl("ino %lu, size %lld -> %lld",
1155  (unsigned long)inum, old_size, new_size);
1156  ubifs_assert(!ui->data_len);
1157  ubifs_assert(S_ISREG(inode->i_mode));
1158  ubifs_assert(mutex_is_locked(&ui->ui_mutex));
1159 
1162  ino = kmalloc(sz, GFP_NOFS);
1163  if (!ino)
1164  return -ENOMEM;
1165 
1166  trun = (void *)ino + UBIFS_INO_NODE_SZ;
1167  trun->ch.node_type = UBIFS_TRUN_NODE;
1168  trun->inum = cpu_to_le32(inum);
1169  trun->old_size = cpu_to_le64(old_size);
1170  trun->new_size = cpu_to_le64(new_size);
1171  zero_trun_node_unused(trun);
1172 
1173  dlen = new_size & (UBIFS_BLOCK_SIZE - 1);
1174  if (dlen) {
1175  /* Get last data block so it can be truncated */
1176  dn = (void *)trun + UBIFS_TRUN_NODE_SZ;
1177  blk = new_size >> UBIFS_BLOCK_SHIFT;
1178  data_key_init(c, &key, inum, blk);
1179  dbg_jnlk(&key, "last block key ");
1180  err = ubifs_tnc_lookup(c, &key, dn);
1181  if (err == -ENOENT)
1182  dlen = 0; /* Not found (so it is a hole) */
1183  else if (err)
1184  goto out_free;
1185  else {
1186  if (le32_to_cpu(dn->size) <= dlen)
1187  dlen = 0; /* Nothing to do */
1188  else {
1189  int compr_type = le16_to_cpu(dn->compr_type);
1190 
1191  if (compr_type != UBIFS_COMPR_NONE) {
1192  err = recomp_data_node(dn, &dlen);
1193  if (err)
1194  goto out_free;
1195  } else {
1196  dn->size = cpu_to_le32(dlen);
1197  dlen += UBIFS_DATA_NODE_SZ;
1198  }
1199  zero_data_node_unused(dn);
1200  }
1201  }
1202  }
1203 
1204  /* Must make reservation before allocating sequence numbers */
1206  if (dlen)
1207  len += dlen;
1208  err = make_reservation(c, BASEHD, len);
1209  if (err)
1210  goto out_free;
1211 
1212  pack_inode(c, ino, inode, 0);
1213  ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
1214  if (dlen)
1215  ubifs_prep_grp_node(c, dn, dlen, 1);
1216 
1217  err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
1218  if (err)
1219  goto out_release;
1220  if (!sync)
1221  ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
1222  release_head(c, BASEHD);
1223 
1224  if (dlen) {
1225  sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
1226  err = ubifs_tnc_add(c, &key, lnum, sz, dlen);
1227  if (err)
1228  goto out_ro;
1229  }
1230 
1231  ino_key_init(c, &key, inum);
1232  err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ);
1233  if (err)
1234  goto out_ro;
1235 
1236  err = ubifs_add_dirt(c, lnum, UBIFS_TRUN_NODE_SZ);
1237  if (err)
1238  goto out_ro;
1239 
1240  bit = new_size & (UBIFS_BLOCK_SIZE - 1);
1241  blk = (new_size >> UBIFS_BLOCK_SHIFT) + (bit ? 1 : 0);
1242  data_key_init(c, &key, inum, blk);
1243 
1244  bit = old_size & (UBIFS_BLOCK_SIZE - 1);
1245  blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1);
1246  data_key_init(c, &to_key, inum, blk);
1247 
1248  err = ubifs_tnc_remove_range(c, &key, &to_key);
1249  if (err)
1250  goto out_ro;
1251 
1252  finish_reservation(c);
1253  spin_lock(&ui->ui_lock);
1254  ui->synced_i_size = ui->ui_size;
1255  spin_unlock(&ui->ui_lock);
1256  mark_inode_clean(c, ui);
1257  kfree(ino);
1258  return 0;
1259 
1260 out_release:
1261  release_head(c, BASEHD);
1262 out_ro:
1263  ubifs_ro_mode(c, err);
1264  finish_reservation(c);
1265 out_free:
1266  kfree(ino);
1267  return err;
1268 }
1269 
1270 
1283 int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
1284  const struct inode *inode, const struct qstr *nm)
1285 {
1286  int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
1287  struct ubifs_dent_node *xent;
1288  struct ubifs_ino_node *ino;
1289  union ubifs_key xent_key, key1, key2;
1290  int sync = IS_DIRSYNC(host);
1291  struct ubifs_inode *host_ui = ubifs_inode(host);
1292 
1293  dbg_jnl("host %lu, xattr ino %lu, name '%s', data len %d",
1294  host->i_ino, inode->i_ino, nm->name,
1295  ubifs_inode(inode)->data_len);
1296  ubifs_assert(inode->i_nlink == 0);
1297  ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1298 
1299  /*
1300  * Since we are deleting the inode, we do not bother to attach any data
1301  * to it and assume its length is %UBIFS_INO_NODE_SZ.
1302  */
1303  xlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
1304  aligned_xlen = ALIGN(xlen, 8);
1305  hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
1306  len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
1307 
1308  xent = kmalloc(len, GFP_NOFS);
1309  if (!xent)
1310  return -ENOMEM;
1311 
1312  /* Make reservation before allocating sequence numbers */
1313  err = make_reservation(c, BASEHD, len);
1314  if (err) {
1315  kfree(xent);
1316  return err;
1317  }
1318 
1319  xent->ch.node_type = UBIFS_XENT_NODE;
1320  xent_key_init(c, &xent_key, host->i_ino, nm);
1321  key_write(c, &xent_key, xent->key);
1322  xent->inum = 0;
1323  xent->type = get_dent_type(inode->i_mode);
1324  xent->nlen = cpu_to_le16(nm->len);
1325  memcpy(xent->name, nm->name, nm->len);
1326  xent->name[nm->len] = '\0';
1327  zero_dent_node_unused(xent);
1328  ubifs_prep_grp_node(c, xent, xlen, 0);
1329 
1330  ino = (void *)xent + aligned_xlen;
1331  pack_inode(c, ino, inode, 0);
1332  ino = (void *)ino + UBIFS_INO_NODE_SZ;
1333  pack_inode(c, ino, host, 1);
1334 
1335  err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
1336  if (!sync && !err)
1337  ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
1338  release_head(c, BASEHD);
1339  kfree(xent);
1340  if (err)
1341  goto out_ro;
1342 
1343  /* Remove the extended attribute entry from TNC */
1344  err = ubifs_tnc_remove_nm(c, &xent_key, nm);
1345  if (err)
1346  goto out_ro;
1347  err = ubifs_add_dirt(c, lnum, xlen);
1348  if (err)
1349  goto out_ro;
1350 
1351  /*
1352  * Remove all nodes belonging to the extended attribute inode from TNC.
1353  * Well, there actually must be only one node - the inode itself.
1354  */
1355  lowest_ino_key(c, &key1, inode->i_ino);
1356  highest_ino_key(c, &key2, inode->i_ino);
1357  err = ubifs_tnc_remove_range(c, &key1, &key2);
1358  if (err)
1359  goto out_ro;
1360  err = ubifs_add_dirt(c, lnum, UBIFS_INO_NODE_SZ);
1361  if (err)
1362  goto out_ro;
1363 
1364  /* And update TNC with the new host inode position */
1365  ino_key_init(c, &key1, host->i_ino);
1366  err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen);
1367  if (err)
1368  goto out_ro;
1369 
1370  finish_reservation(c);
1371  spin_lock(&host_ui->ui_lock);
1372  host_ui->synced_i_size = host_ui->ui_size;
1373  spin_unlock(&host_ui->ui_lock);
1374  mark_inode_clean(c, host_ui);
1375  return 0;
1376 
1377 out_ro:
1378  ubifs_ro_mode(c, err);
1379  finish_reservation(c);
1380  return err;
1381 }
1382 
1396 int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
1397  const struct inode *host)
1398 {
1399  int err, len1, len2, aligned_len, aligned_len1, lnum, offs;
1400  struct ubifs_inode *host_ui = ubifs_inode(host);
1401  struct ubifs_ino_node *ino;
1402  union ubifs_key key;
1403  int sync = IS_DIRSYNC(host);
1404 
1405  dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
1406  ubifs_assert(host->i_nlink > 0);
1407  ubifs_assert(inode->i_nlink > 0);
1408  ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
1409 
1410  len1 = UBIFS_INO_NODE_SZ + host_ui->data_len;
1411  len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
1412  aligned_len1 = ALIGN(len1, 8);
1413  aligned_len = aligned_len1 + ALIGN(len2, 8);
1414 
1415  ino = kmalloc(aligned_len, GFP_NOFS);
1416  if (!ino)
1417  return -ENOMEM;
1418 
1419  /* Make reservation before allocating sequence numbers */
1420  err = make_reservation(c, BASEHD, aligned_len);
1421  if (err)
1422  goto out_free;
1423 
1424  pack_inode(c, ino, host, 0);
1425  pack_inode(c, (void *)ino + aligned_len1, inode, 1);
1426 
1427  err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
1428  if (!sync && !err) {
1429  struct ubifs_wbuf *wbuf = &c->jheads[BASEHD].wbuf;
1430 
1431  ubifs_wbuf_add_ino_nolock(wbuf, host->i_ino);
1432  ubifs_wbuf_add_ino_nolock(wbuf, inode->i_ino);
1433  }
1434  release_head(c, BASEHD);
1435  if (err)
1436  goto out_ro;
1437 
1438  ino_key_init(c, &key, host->i_ino);
1439  err = ubifs_tnc_add(c, &key, lnum, offs, len1);
1440  if (err)
1441  goto out_ro;
1442 
1443  ino_key_init(c, &key, inode->i_ino);
1444  err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2);
1445  if (err)
1446  goto out_ro;
1447 
1448  finish_reservation(c);
1449  spin_lock(&host_ui->ui_lock);
1450  host_ui->synced_i_size = host_ui->ui_size;
1451  spin_unlock(&host_ui->ui_lock);
1452  mark_inode_clean(c, host_ui);
1453  kfree(ino);
1454  return 0;
1455 
1456 out_ro:
1457  ubifs_ro_mode(c, err);
1458  finish_reservation(c);
1459 out_free:
1460  kfree(ino);
1461  return err;
1462 }
1463