Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
file.c
Go to the documentation of this file.
1 /*
2  * This file is part of UBIFS.
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 51
17  * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18  *
19  * Authors: Artem Bityutskiy (Битюцкий Артём)
20  * Adrian Hunter
21  */
22 
23 /*
24  * This file implements VFS file and inode operations for regular files, device
25  * nodes and symlinks as well as address space operations.
26  *
27  * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
28  * the page is dirty and is used for optimization purposes - dirty pages are
29  * not budgeted so the flag shows that 'ubifs_write_end()' should not release
30  * the budget for this page. The @PG_checked flag is set if full budgeting is
31  * required for the page e.g., when it corresponds to a file hole or it is
32  * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
33  * it is OK to fail in this function, and the budget is released in
34  * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
35  * information about how the page was budgeted, to make it possible to release
36  * the budget properly.
37  *
38  * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
39  * implement. However, this is not true for 'ubifs_writepage()', which may be
40  * called with @i_mutex unlocked. For example, when flusher thread is doing
41  * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
42  * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
43  * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
44  * 'ubifs_writepage()' we are only guaranteed that the page is locked.
45  *
46  * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
47  * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
48  * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not
49  * set as well. However, UBIFS disables readahead.
50  */
51 
52 #include "ubifs.h"
53 #include <linux/mount.h>
54 #include <linux/namei.h>
55 #include <linux/slab.h>
56 
57 static int read_block(struct inode *inode, void *addr, unsigned int block,
58  struct ubifs_data_node *dn)
59 {
60  struct ubifs_info *c = inode->i_sb->s_fs_info;
61  int err, len, out_len;
62  union ubifs_key key;
63  unsigned int dlen;
64 
65  data_key_init(c, &key, inode->i_ino, block);
66  err = ubifs_tnc_lookup(c, &key, dn);
67  if (err) {
68  if (err == -ENOENT)
69  /* Not found, so it must be a hole */
70  memset(addr, 0, UBIFS_BLOCK_SIZE);
71  return err;
72  }
73 
74  ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
75  ubifs_inode(inode)->creat_sqnum);
76  len = le32_to_cpu(dn->size);
77  if (len <= 0 || len > UBIFS_BLOCK_SIZE)
78  goto dump;
79 
80  dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
81  out_len = UBIFS_BLOCK_SIZE;
82  err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
83  le16_to_cpu(dn->compr_type));
84  if (err || len != out_len)
85  goto dump;
86 
87  /*
88  * Data length can be less than a full block, even for blocks that are
89  * not the last in the file (e.g., as a result of making a hole and
90  * appending data). Ensure that the remainder is zeroed out.
91  */
92  if (len < UBIFS_BLOCK_SIZE)
93  memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
94 
95  return 0;
96 
97 dump:
98  ubifs_err("bad data node (block %u, inode %lu)",
99  block, inode->i_ino);
100  ubifs_dump_node(c, dn);
101  return -EINVAL;
102 }
103 
104 static int do_readpage(struct page *page)
105 {
106  void *addr;
107  int err = 0, i;
108  unsigned int block, beyond;
109  struct ubifs_data_node *dn;
110  struct inode *inode = page->mapping->host;
111  loff_t i_size = i_size_read(inode);
112 
113  dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
114  inode->i_ino, page->index, i_size, page->flags);
115  ubifs_assert(!PageChecked(page));
116  ubifs_assert(!PagePrivate(page));
117 
118  addr = kmap(page);
119 
120  block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
121  beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
122  if (block >= beyond) {
123  /* Reading beyond inode */
124  SetPageChecked(page);
125  memset(addr, 0, PAGE_CACHE_SIZE);
126  goto out;
127  }
128 
130  if (!dn) {
131  err = -ENOMEM;
132  goto error;
133  }
134 
135  i = 0;
136  while (1) {
137  int ret;
138 
139  if (block >= beyond) {
140  /* Reading beyond inode */
141  err = -ENOENT;
142  memset(addr, 0, UBIFS_BLOCK_SIZE);
143  } else {
144  ret = read_block(inode, addr, block, dn);
145  if (ret) {
146  err = ret;
147  if (err != -ENOENT)
148  break;
149  } else if (block + 1 == beyond) {
150  int dlen = le32_to_cpu(dn->size);
151  int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
152 
153  if (ilen && ilen < dlen)
154  memset(addr + ilen, 0, dlen - ilen);
155  }
156  }
157  if (++i >= UBIFS_BLOCKS_PER_PAGE)
158  break;
159  block += 1;
160  addr += UBIFS_BLOCK_SIZE;
161  }
162  if (err) {
163  if (err == -ENOENT) {
164  /* Not found, so it must be a hole */
165  SetPageChecked(page);
166  dbg_gen("hole");
167  goto out_free;
168  }
169  ubifs_err("cannot read page %lu of inode %lu, error %d",
170  page->index, inode->i_ino, err);
171  goto error;
172  }
173 
174 out_free:
175  kfree(dn);
176 out:
177  SetPageUptodate(page);
178  ClearPageError(page);
179  flush_dcache_page(page);
180  kunmap(page);
181  return 0;
182 
183 error:
184  kfree(dn);
185  ClearPageUptodate(page);
186  SetPageError(page);
187  flush_dcache_page(page);
188  kunmap(page);
189  return err;
190 }
191 
199 static void release_new_page_budget(struct ubifs_info *c)
200 {
201  struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
202 
203  ubifs_release_budget(c, &req);
204 }
205 
213 static void release_existing_page_budget(struct ubifs_info *c)
214 {
215  struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
216 
217  ubifs_release_budget(c, &req);
218 }
219 
220 static int write_begin_slow(struct address_space *mapping,
221  loff_t pos, unsigned len, struct page **pagep,
222  unsigned flags)
223 {
224  struct inode *inode = mapping->host;
225  struct ubifs_info *c = inode->i_sb->s_fs_info;
226  pgoff_t index = pos >> PAGE_CACHE_SHIFT;
227  struct ubifs_budget_req req = { .new_page = 1 };
228  int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
229  struct page *page;
230 
231  dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
232  inode->i_ino, pos, len, inode->i_size);
233 
234  /*
235  * At the slow path we have to budget before locking the page, because
236  * budgeting may force write-back, which would wait on locked pages and
237  * deadlock if we had the page locked. At this point we do not know
238  * anything about the page, so assume that this is a new page which is
239  * written to a hole. This corresponds to largest budget. Later the
240  * budget will be amended if this is not true.
241  */
242  if (appending)
243  /* We are appending data, budget for inode change */
244  req.dirtied_ino = 1;
245 
246  err = ubifs_budget_space(c, &req);
247  if (unlikely(err))
248  return err;
249 
250  page = grab_cache_page_write_begin(mapping, index, flags);
251  if (unlikely(!page)) {
252  ubifs_release_budget(c, &req);
253  return -ENOMEM;
254  }
255 
256  if (!PageUptodate(page)) {
257  if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE)
258  SetPageChecked(page);
259  else {
260  err = do_readpage(page);
261  if (err) {
262  unlock_page(page);
263  page_cache_release(page);
264  return err;
265  }
266  }
267 
268  SetPageUptodate(page);
269  ClearPageError(page);
270  }
271 
272  if (PagePrivate(page))
273  /*
274  * The page is dirty, which means it was budgeted twice:
275  * o first time the budget was allocated by the task which
276  * made the page dirty and set the PG_private flag;
277  * o and then we budgeted for it for the second time at the
278  * very beginning of this function.
279  *
280  * So what we have to do is to release the page budget we
281  * allocated.
282  */
283  release_new_page_budget(c);
284  else if (!PageChecked(page))
285  /*
286  * We are changing a page which already exists on the media.
287  * This means that changing the page does not make the amount
288  * of indexing information larger, and this part of the budget
289  * which we have already acquired may be released.
290  */
292 
293  if (appending) {
294  struct ubifs_inode *ui = ubifs_inode(inode);
295 
296  /*
297  * 'ubifs_write_end()' is optimized from the fast-path part of
298  * 'ubifs_write_begin()' and expects the @ui_mutex to be locked
299  * if data is appended.
300  */
301  mutex_lock(&ui->ui_mutex);
302  if (ui->dirty)
303  /*
304  * The inode is dirty already, so we may free the
305  * budget we allocated.
306  */
308  }
309 
310  *pagep = page;
311  return 0;
312 }
313 
327 static int allocate_budget(struct ubifs_info *c, struct page *page,
328  struct ubifs_inode *ui, int appending)
329 {
330  struct ubifs_budget_req req = { .fast = 1 };
331 
332  if (PagePrivate(page)) {
333  if (!appending)
334  /*
335  * The page is dirty and we are not appending, which
336  * means no budget is needed at all.
337  */
338  return 0;
339 
340  mutex_lock(&ui->ui_mutex);
341  if (ui->dirty)
342  /*
343  * The page is dirty and we are appending, so the inode
344  * has to be marked as dirty. However, it is already
345  * dirty, so we do not need any budget. We may return,
346  * but @ui->ui_mutex hast to be left locked because we
347  * should prevent write-back from flushing the inode
348  * and freeing the budget. The lock will be released in
349  * 'ubifs_write_end()'.
350  */
351  return 0;
352 
353  /*
354  * The page is dirty, we are appending, the inode is clean, so
355  * we need to budget the inode change.
356  */
357  req.dirtied_ino = 1;
358  } else {
359  if (PageChecked(page))
360  /*
361  * The page corresponds to a hole and does not
362  * exist on the media. So changing it makes
363  * make the amount of indexing information
364  * larger, and we have to budget for a new
365  * page.
366  */
367  req.new_page = 1;
368  else
369  /*
370  * Not a hole, the change will not add any new
371  * indexing information, budget for page
372  * change.
373  */
374  req.dirtied_page = 1;
375 
376  if (appending) {
377  mutex_lock(&ui->ui_mutex);
378  if (!ui->dirty)
379  /*
380  * The inode is clean but we will have to mark
381  * it as dirty because we are appending. This
382  * needs a budget.
383  */
384  req.dirtied_ino = 1;
385  }
386  }
387 
388  return ubifs_budget_space(c, &req);
389 }
390 
391 /*
392  * This function is called when a page of data is going to be written. Since
393  * the page of data will not necessarily go to the flash straight away, UBIFS
394  * has to reserve space on the media for it, which is done by means of
395  * budgeting.
396  *
397  * This is the hot-path of the file-system and we are trying to optimize it as
398  * much as possible. For this reasons it is split on 2 parts - slow and fast.
399  *
400  * There many budgeting cases:
401  * o a new page is appended - we have to budget for a new page and for
402  * changing the inode; however, if the inode is already dirty, there is
403  * no need to budget for it;
404  * o an existing clean page is changed - we have budget for it; if the page
405  * does not exist on the media (a hole), we have to budget for a new
406  * page; otherwise, we may budget for changing an existing page; the
407  * difference between these cases is that changing an existing page does
408  * not introduce anything new to the FS indexing information, so it does
409  * not grow, and smaller budget is acquired in this case;
410  * o an existing dirty page is changed - no need to budget at all, because
411  * the page budget has been acquired by earlier, when the page has been
412  * marked dirty.
413  *
414  * UBIFS budgeting sub-system may force write-back if it thinks there is no
415  * space to reserve. This imposes some locking restrictions and makes it
416  * impossible to take into account the above cases, and makes it impossible to
417  * optimize budgeting.
418  *
419  * The solution for this is that the fast path of 'ubifs_write_begin()' assumes
420  * there is a plenty of flash space and the budget will be acquired quickly,
421  * without forcing write-back. The slow path does not make this assumption.
422  */
423 static int ubifs_write_begin(struct file *file, struct address_space *mapping,
424  loff_t pos, unsigned len, unsigned flags,
425  struct page **pagep, void **fsdata)
426 {
427  struct inode *inode = mapping->host;
428  struct ubifs_info *c = inode->i_sb->s_fs_info;
429  struct ubifs_inode *ui = ubifs_inode(inode);
430  pgoff_t index = pos >> PAGE_CACHE_SHIFT;
431  int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
432  int skipped_read = 0;
433  struct page *page;
434 
435  ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
436  ubifs_assert(!c->ro_media && !c->ro_mount);
437 
438  if (unlikely(c->ro_error))
439  return -EROFS;
440 
441  /* Try out the fast-path part first */
442  page = grab_cache_page_write_begin(mapping, index, flags);
443  if (unlikely(!page))
444  return -ENOMEM;
445 
446  if (!PageUptodate(page)) {
447  /* The page is not loaded from the flash */
448  if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) {
449  /*
450  * We change whole page so no need to load it. But we
451  * do not know whether this page exists on the media or
452  * not, so we assume the latter because it requires
453  * larger budget. The assumption is that it is better
454  * to budget a bit more than to read the page from the
455  * media. Thus, we are setting the @PG_checked flag
456  * here.
457  */
458  SetPageChecked(page);
459  skipped_read = 1;
460  } else {
461  err = do_readpage(page);
462  if (err) {
463  unlock_page(page);
464  page_cache_release(page);
465  return err;
466  }
467  }
468 
469  SetPageUptodate(page);
470  ClearPageError(page);
471  }
472 
473  err = allocate_budget(c, page, ui, appending);
474  if (unlikely(err)) {
475  ubifs_assert(err == -ENOSPC);
476  /*
477  * If we skipped reading the page because we were going to
478  * write all of it, then it is not up to date.
479  */
480  if (skipped_read) {
481  ClearPageChecked(page);
482  ClearPageUptodate(page);
483  }
484  /*
485  * Budgeting failed which means it would have to force
486  * write-back but didn't, because we set the @fast flag in the
487  * request. Write-back cannot be done now, while we have the
488  * page locked, because it would deadlock. Unlock and free
489  * everything and fall-back to slow-path.
490  */
491  if (appending) {
492  ubifs_assert(mutex_is_locked(&ui->ui_mutex));
493  mutex_unlock(&ui->ui_mutex);
494  }
495  unlock_page(page);
496  page_cache_release(page);
497 
498  return write_begin_slow(mapping, pos, len, pagep, flags);
499  }
500 
501  /*
502  * Whee, we acquired budgeting quickly - without involving
503  * garbage-collection, committing or forcing write-back. We return
504  * with @ui->ui_mutex locked if we are appending pages, and unlocked
505  * otherwise. This is an optimization (slightly hacky though).
506  */
507  *pagep = page;
508  return 0;
509 
510 }
511 
522 static void cancel_budget(struct ubifs_info *c, struct page *page,
523  struct ubifs_inode *ui, int appending)
524 {
525  if (appending) {
526  if (!ui->dirty)
528  mutex_unlock(&ui->ui_mutex);
529  }
530  if (!PagePrivate(page)) {
531  if (PageChecked(page))
532  release_new_page_budget(c);
533  else
534  release_existing_page_budget(c);
535  }
536 }
537 
538 static int ubifs_write_end(struct file *file, struct address_space *mapping,
539  loff_t pos, unsigned len, unsigned copied,
540  struct page *page, void *fsdata)
541 {
542  struct inode *inode = mapping->host;
543  struct ubifs_inode *ui = ubifs_inode(inode);
544  struct ubifs_info *c = inode->i_sb->s_fs_info;
545  loff_t end_pos = pos + len;
546  int appending = !!(end_pos > inode->i_size);
547 
548  dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
549  inode->i_ino, pos, page->index, len, copied, inode->i_size);
550 
551  if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) {
552  /*
553  * VFS copied less data to the page that it intended and
554  * declared in its '->write_begin()' call via the @len
555  * argument. If the page was not up-to-date, and @len was
556  * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did
557  * not load it from the media (for optimization reasons). This
558  * means that part of the page contains garbage. So read the
559  * page now.
560  */
561  dbg_gen("copied %d instead of %d, read page and repeat",
562  copied, len);
563  cancel_budget(c, page, ui, appending);
564  ClearPageChecked(page);
565 
566  /*
567  * Return 0 to force VFS to repeat the whole operation, or the
568  * error code if 'do_readpage()' fails.
569  */
570  copied = do_readpage(page);
571  goto out;
572  }
573 
574  if (!PagePrivate(page)) {
575  SetPagePrivate(page);
576  atomic_long_inc(&c->dirty_pg_cnt);
578  }
579 
580  if (appending) {
581  i_size_write(inode, end_pos);
582  ui->ui_size = end_pos;
583  /*
584  * Note, we do not set @I_DIRTY_PAGES (which means that the
585  * inode has dirty pages), this has been done in
586  * '__set_page_dirty_nobuffers()'.
587  */
589  ubifs_assert(mutex_is_locked(&ui->ui_mutex));
590  mutex_unlock(&ui->ui_mutex);
591  }
592 
593 out:
594  unlock_page(page);
595  page_cache_release(page);
596  return copied;
597 }
598 
608 static int populate_page(struct ubifs_info *c, struct page *page,
609  struct bu_info *bu, int *n)
610 {
611  int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
612  struct inode *inode = page->mapping->host;
613  loff_t i_size = i_size_read(inode);
614  unsigned int page_block;
615  void *addr, *zaddr;
616  pgoff_t end_index;
617 
618  dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
619  inode->i_ino, page->index, i_size, page->flags);
620 
621  addr = zaddr = kmap(page);
622 
623  end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
624  if (!i_size || page->index > end_index) {
625  hole = 1;
626  memset(addr, 0, PAGE_CACHE_SIZE);
627  goto out_hole;
628  }
629 
630  page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
631  while (1) {
632  int err, len, out_len, dlen;
633 
634  if (nn >= bu->cnt) {
635  hole = 1;
636  memset(addr, 0, UBIFS_BLOCK_SIZE);
637  } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
638  struct ubifs_data_node *dn;
639 
640  dn = bu->buf + (bu->zbranch[nn].offs - offs);
641 
642  ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
643  ubifs_inode(inode)->creat_sqnum);
644 
645  len = le32_to_cpu(dn->size);
646  if (len <= 0 || len > UBIFS_BLOCK_SIZE)
647  goto out_err;
648 
649  dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
650  out_len = UBIFS_BLOCK_SIZE;
651  err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
652  le16_to_cpu(dn->compr_type));
653  if (err || len != out_len)
654  goto out_err;
655 
656  if (len < UBIFS_BLOCK_SIZE)
657  memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
658 
659  nn += 1;
660  read = (i << UBIFS_BLOCK_SHIFT) + len;
661  } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
662  nn += 1;
663  continue;
664  } else {
665  hole = 1;
666  memset(addr, 0, UBIFS_BLOCK_SIZE);
667  }
668  if (++i >= UBIFS_BLOCKS_PER_PAGE)
669  break;
670  addr += UBIFS_BLOCK_SIZE;
671  page_block += 1;
672  }
673 
674  if (end_index == page->index) {
675  int len = i_size & (PAGE_CACHE_SIZE - 1);
676 
677  if (len && len < read)
678  memset(zaddr + len, 0, read - len);
679  }
680 
681 out_hole:
682  if (hole) {
683  SetPageChecked(page);
684  dbg_gen("hole");
685  }
686 
687  SetPageUptodate(page);
688  ClearPageError(page);
689  flush_dcache_page(page);
690  kunmap(page);
691  *n = nn;
692  return 0;
693 
694 out_err:
695  ClearPageUptodate(page);
696  SetPageError(page);
697  flush_dcache_page(page);
698  kunmap(page);
699  ubifs_err("bad data node (block %u, inode %lu)",
700  page_block, inode->i_ino);
701  return -EINVAL;
702 }
703 
712 static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
713  struct page *page1)
714 {
715  pgoff_t offset = page1->index, end_index;
716  struct address_space *mapping = page1->mapping;
717  struct inode *inode = mapping->host;
718  struct ubifs_inode *ui = ubifs_inode(inode);
719  int err, page_idx, page_cnt, ret = 0, n = 0;
720  int allocate = bu->buf ? 0 : 1;
721  loff_t isize;
722 
723  err = ubifs_tnc_get_bu_keys(c, bu);
724  if (err)
725  goto out_warn;
726 
727  if (bu->eof) {
728  /* Turn off bulk-read at the end of the file */
729  ui->read_in_a_row = 1;
730  ui->bulk_read = 0;
731  }
732 
733  page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
734  if (!page_cnt) {
735  /*
736  * This happens when there are multiple blocks per page and the
737  * blocks for the first page we are looking for, are not
738  * together. If all the pages were like this, bulk-read would
739  * reduce performance, so we turn it off for a while.
740  */
741  goto out_bu_off;
742  }
743 
744  if (bu->cnt) {
745  if (allocate) {
746  /*
747  * Allocate bulk-read buffer depending on how many data
748  * nodes we are going to read.
749  */
750  bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
751  bu->zbranch[bu->cnt - 1].len -
752  bu->zbranch[0].offs;
753  ubifs_assert(bu->buf_len > 0);
754  ubifs_assert(bu->buf_len <= c->leb_size);
755  bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
756  if (!bu->buf)
757  goto out_bu_off;
758  }
759 
760  err = ubifs_tnc_bulk_read(c, bu);
761  if (err)
762  goto out_warn;
763  }
764 
765  err = populate_page(c, page1, bu, &n);
766  if (err)
767  goto out_warn;
768 
769  unlock_page(page1);
770  ret = 1;
771 
772  isize = i_size_read(inode);
773  if (isize == 0)
774  goto out_free;
775  end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
776 
777  for (page_idx = 1; page_idx < page_cnt; page_idx++) {
778  pgoff_t page_offset = offset + page_idx;
779  struct page *page;
780 
781  if (page_offset > end_index)
782  break;
783  page = find_or_create_page(mapping, page_offset,
784  GFP_NOFS | __GFP_COLD);
785  if (!page)
786  break;
787  if (!PageUptodate(page))
788  err = populate_page(c, page, bu, &n);
789  unlock_page(page);
790  page_cache_release(page);
791  if (err)
792  break;
793  }
794 
795  ui->last_page_read = offset + page_idx - 1;
796 
797 out_free:
798  if (allocate)
799  kfree(bu->buf);
800  return ret;
801 
802 out_warn:
803  ubifs_warn("ignoring error %d and skipping bulk-read", err);
804  goto out_free;
805 
806 out_bu_off:
807  ui->read_in_a_row = ui->bulk_read = 0;
808  goto out_free;
809 }
810 
820 static int ubifs_bulk_read(struct page *page)
821 {
822  struct inode *inode = page->mapping->host;
823  struct ubifs_info *c = inode->i_sb->s_fs_info;
824  struct ubifs_inode *ui = ubifs_inode(inode);
825  pgoff_t index = page->index, last_page_read = ui->last_page_read;
826  struct bu_info *bu;
827  int err = 0, allocated = 0;
828 
829  ui->last_page_read = index;
830  if (!c->bulk_read)
831  return 0;
832 
833  /*
834  * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
835  * so don't bother if we cannot lock the mutex.
836  */
837  if (!mutex_trylock(&ui->ui_mutex))
838  return 0;
839 
840  if (index != last_page_read + 1) {
841  /* Turn off bulk-read if we stop reading sequentially */
842  ui->read_in_a_row = 1;
843  if (ui->bulk_read)
844  ui->bulk_read = 0;
845  goto out_unlock;
846  }
847 
848  if (!ui->bulk_read) {
849  ui->read_in_a_row += 1;
850  if (ui->read_in_a_row < 3)
851  goto out_unlock;
852  /* Three reads in a row, so switch on bulk-read */
853  ui->bulk_read = 1;
854  }
855 
856  /*
857  * If possible, try to use pre-allocated bulk-read information, which
858  * is protected by @c->bu_mutex.
859  */
860  if (mutex_trylock(&c->bu_mutex))
861  bu = &c->bu;
862  else {
863  bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
864  if (!bu)
865  goto out_unlock;
866 
867  bu->buf = NULL;
868  allocated = 1;
869  }
870 
871  bu->buf_len = c->max_bu_buf_len;
872  data_key_init(c, &bu->key, inode->i_ino,
874  err = ubifs_do_bulk_read(c, bu, page);
875 
876  if (!allocated)
877  mutex_unlock(&c->bu_mutex);
878  else
879  kfree(bu);
880 
881 out_unlock:
882  mutex_unlock(&ui->ui_mutex);
883  return err;
884 }
885 
886 static int ubifs_readpage(struct file *file, struct page *page)
887 {
888  if (ubifs_bulk_read(page))
889  return 0;
890  do_readpage(page);
891  unlock_page(page);
892  return 0;
893 }
894 
895 static int do_writepage(struct page *page, int len)
896 {
897  int err = 0, i, blen;
898  unsigned int block;
899  void *addr;
900  union ubifs_key key;
901  struct inode *inode = page->mapping->host;
902  struct ubifs_info *c = inode->i_sb->s_fs_info;
903 
904 #ifdef UBIFS_DEBUG
905  spin_lock(&ui->ui_lock);
907  spin_unlock(&ui->ui_lock);
908 #endif
909 
910  /* Update radix tree tags */
911  set_page_writeback(page);
912 
913  addr = kmap(page);
914  block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
915  i = 0;
916  while (len) {
917  blen = min_t(int, len, UBIFS_BLOCK_SIZE);
918  data_key_init(c, &key, inode->i_ino, block);
919  err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
920  if (err)
921  break;
922  if (++i >= UBIFS_BLOCKS_PER_PAGE)
923  break;
924  block += 1;
925  addr += blen;
926  len -= blen;
927  }
928  if (err) {
929  SetPageError(page);
930  ubifs_err("cannot write page %lu of inode %lu, error %d",
931  page->index, inode->i_ino, err);
932  ubifs_ro_mode(c, err);
933  }
934 
935  ubifs_assert(PagePrivate(page));
936  if (PageChecked(page))
937  release_new_page_budget(c);
938  else
939  release_existing_page_budget(c);
940 
941  atomic_long_dec(&c->dirty_pg_cnt);
942  ClearPagePrivate(page);
943  ClearPageChecked(page);
944 
945  kunmap(page);
946  unlock_page(page);
947  end_page_writeback(page);
948  return err;
949 }
950 
951 /*
952  * When writing-back dirty inodes, VFS first writes-back pages belonging to the
953  * inode, then the inode itself. For UBIFS this may cause a problem. Consider a
954  * situation when a we have an inode with size 0, then a megabyte of data is
955  * appended to the inode, then write-back starts and flushes some amount of the
956  * dirty pages, the journal becomes full, commit happens and finishes, and then
957  * an unclean reboot happens. When the file system is mounted next time, the
958  * inode size would still be 0, but there would be many pages which are beyond
959  * the inode size, they would be indexed and consume flash space. Because the
960  * journal has been committed, the replay would not be able to detect this
961  * situation and correct the inode size. This means UBIFS would have to scan
962  * whole index and correct all inode sizes, which is long an unacceptable.
963  *
964  * To prevent situations like this, UBIFS writes pages back only if they are
965  * within the last synchronized inode size, i.e. the size which has been
966  * written to the flash media last time. Otherwise, UBIFS forces inode
967  * write-back, thus making sure the on-flash inode contains current inode size,
968  * and then keeps writing pages back.
969  *
970  * Some locking issues explanation. 'ubifs_writepage()' first is called with
971  * the page locked, and it locks @ui_mutex. However, write-back does take inode
972  * @i_mutex, which means other VFS operations may be run on this inode at the
973  * same time. And the problematic one is truncation to smaller size, from where
974  * we have to call 'truncate_setsize()', which first changes @inode->i_size,
975  * then drops the truncated pages. And while dropping the pages, it takes the
976  * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
977  * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'.
978  * This means that @inode->i_size is changed while @ui_mutex is unlocked.
979  *
980  * XXX(truncate): with the new truncate sequence this is not true anymore,
981  * and the calls to truncate_setsize can be move around freely. They should
982  * be moved to the very end of the truncate sequence.
983  *
984  * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
985  * inode size. How do we do this if @inode->i_size may became smaller while we
986  * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the
987  * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size
988  * internally and updates it under @ui_mutex.
989  *
990  * Q: why we do not worry that if we race with truncation, we may end up with a
991  * situation when the inode is truncated while we are in the middle of
992  * 'do_writepage()', so we do write beyond inode size?
993  * A: If we are in the middle of 'do_writepage()', truncation would be locked
994  * on the page lock and it would not write the truncated inode node to the
995  * journal before we have finished.
996  */
997 static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
998 {
999  struct inode *inode = page->mapping->host;
1000  struct ubifs_inode *ui = ubifs_inode(inode);
1001  loff_t i_size = i_size_read(inode), synced_i_size;
1002  pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
1003  int err, len = i_size & (PAGE_CACHE_SIZE - 1);
1004  void *kaddr;
1005 
1006  dbg_gen("ino %lu, pg %lu, pg flags %#lx",
1007  inode->i_ino, page->index, page->flags);
1008  ubifs_assert(PagePrivate(page));
1009 
1010  /* Is the page fully outside @i_size? (truncate in progress) */
1011  if (page->index > end_index || (page->index == end_index && !len)) {
1012  err = 0;
1013  goto out_unlock;
1014  }
1015 
1016  spin_lock(&ui->ui_lock);
1018  spin_unlock(&ui->ui_lock);
1019 
1020  /* Is the page fully inside @i_size? */
1021  if (page->index < end_index) {
1022  if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) {
1023  err = inode->i_sb->s_op->write_inode(inode, NULL);
1024  if (err)
1025  goto out_unlock;
1026  /*
1027  * The inode has been written, but the write-buffer has
1028  * not been synchronized, so in case of an unclean
1029  * reboot we may end up with some pages beyond inode
1030  * size, but they would be in the journal (because
1031  * commit flushes write buffers) and recovery would deal
1032  * with this.
1033  */
1034  }
1035  return do_writepage(page, PAGE_CACHE_SIZE);
1036  }
1037 
1038  /*
1039  * The page straddles @i_size. It must be zeroed out on each and every
1040  * writepage invocation because it may be mmapped. "A file is mapped
1041  * in multiples of the page size. For a file that is not a multiple of
1042  * the page size, the remaining memory is zeroed when mapped, and
1043  * writes to that region are not written out to the file."
1044  */
1045  kaddr = kmap_atomic(page);
1046  memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
1047  flush_dcache_page(page);
1048  kunmap_atomic(kaddr);
1049 
1050  if (i_size > synced_i_size) {
1051  err = inode->i_sb->s_op->write_inode(inode, NULL);
1052  if (err)
1053  goto out_unlock;
1054  }
1055 
1056  return do_writepage(page, len);
1057 
1058 out_unlock:
1059  unlock_page(page);
1060  return err;
1061 }
1062 
1068 static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1069 {
1070  if (attr->ia_valid & ATTR_UID)
1071  inode->i_uid = attr->ia_uid;
1072  if (attr->ia_valid & ATTR_GID)
1073  inode->i_gid = attr->ia_gid;
1074  if (attr->ia_valid & ATTR_ATIME)
1075  inode->i_atime = timespec_trunc(attr->ia_atime,
1076  inode->i_sb->s_time_gran);
1077  if (attr->ia_valid & ATTR_MTIME)
1078  inode->i_mtime = timespec_trunc(attr->ia_mtime,
1079  inode->i_sb->s_time_gran);
1080  if (attr->ia_valid & ATTR_CTIME)
1081  inode->i_ctime = timespec_trunc(attr->ia_ctime,
1082  inode->i_sb->s_time_gran);
1083  if (attr->ia_valid & ATTR_MODE) {
1084  umode_t mode = attr->ia_mode;
1085 
1086  if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1087  mode &= ~S_ISGID;
1088  inode->i_mode = mode;
1089  }
1090 }
1091 
1102 static int do_truncation(struct ubifs_info *c, struct inode *inode,
1103  const struct iattr *attr)
1104 {
1105  int err;
1106  struct ubifs_budget_req req;
1107  loff_t old_size = inode->i_size, new_size = attr->ia_size;
1108  int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1109  struct ubifs_inode *ui = ubifs_inode(inode);
1110 
1111  dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1112  memset(&req, 0, sizeof(struct ubifs_budget_req));
1113 
1114  /*
1115  * If this is truncation to a smaller size, and we do not truncate on a
1116  * block boundary, budget for changing one data block, because the last
1117  * block will be re-written.
1118  */
1119  if (new_size & (UBIFS_BLOCK_SIZE - 1))
1120  req.dirtied_page = 1;
1121 
1122  req.dirtied_ino = 1;
1123  /* A funny way to budget for truncation node */
1125  err = ubifs_budget_space(c, &req);
1126  if (err) {
1127  /*
1128  * Treat truncations to zero as deletion and always allow them,
1129  * just like we do for '->unlink()'.
1130  */
1131  if (new_size || err != -ENOSPC)
1132  return err;
1133  budgeted = 0;
1134  }
1135 
1136  truncate_setsize(inode, new_size);
1137 
1138  if (offset) {
1139  pgoff_t index = new_size >> PAGE_CACHE_SHIFT;
1140  struct page *page;
1141 
1142  page = find_lock_page(inode->i_mapping, index);
1143  if (page) {
1144  if (PageDirty(page)) {
1145  /*
1146  * 'ubifs_jnl_truncate()' will try to truncate
1147  * the last data node, but it contains
1148  * out-of-date data because the page is dirty.
1149  * Write the page now, so that
1150  * 'ubifs_jnl_truncate()' will see an already
1151  * truncated (and up to date) data node.
1152  */
1153  ubifs_assert(PagePrivate(page));
1154 
1157  offset = new_size &
1158  (PAGE_CACHE_SIZE - 1);
1159  err = do_writepage(page, offset);
1160  page_cache_release(page);
1161  if (err)
1162  goto out_budg;
1163  /*
1164  * We could now tell 'ubifs_jnl_truncate()' not
1165  * to read the last block.
1166  */
1167  } else {
1168  /*
1169  * We could 'kmap()' the page and pass the data
1170  * to 'ubifs_jnl_truncate()' to save it from
1171  * having to read it.
1172  */
1173  unlock_page(page);
1174  page_cache_release(page);
1175  }
1176  }
1177  }
1178 
1179  mutex_lock(&ui->ui_mutex);
1180  ui->ui_size = inode->i_size;
1181  /* Truncation changes inode [mc]time */
1182  inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1183  /* Other attributes may be changed at the same time as well */
1184  do_attr_changes(inode, attr);
1185  err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1186  mutex_unlock(&ui->ui_mutex);
1187 
1188 out_budg:
1189  if (budgeted)
1190  ubifs_release_budget(c, &req);
1191  else {
1192  c->bi.nospace = c->bi.nospace_rp = 0;
1193  smp_wmb();
1194  }
1195  return err;
1196 }
1197 
1208 static int do_setattr(struct ubifs_info *c, struct inode *inode,
1209  const struct iattr *attr)
1210 {
1211  int err, release;
1212  loff_t new_size = attr->ia_size;
1213  struct ubifs_inode *ui = ubifs_inode(inode);
1214  struct ubifs_budget_req req = { .dirtied_ino = 1,
1215  .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1216 
1217  err = ubifs_budget_space(c, &req);
1218  if (err)
1219  return err;
1220 
1221  if (attr->ia_valid & ATTR_SIZE) {
1222  dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1223  truncate_setsize(inode, new_size);
1224  }
1225 
1226  mutex_lock(&ui->ui_mutex);
1227  if (attr->ia_valid & ATTR_SIZE) {
1228  /* Truncation changes inode [mc]time */
1229  inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1230  /* 'truncate_setsize()' changed @i_size, update @ui_size */
1231  ui->ui_size = inode->i_size;
1232  }
1233 
1234  do_attr_changes(inode, attr);
1235 
1236  release = ui->dirty;
1237  if (attr->ia_valid & ATTR_SIZE)
1238  /*
1239  * Inode length changed, so we have to make sure
1240  * @I_DIRTY_DATASYNC is set.
1241  */
1243  else
1244  mark_inode_dirty_sync(inode);
1245  mutex_unlock(&ui->ui_mutex);
1246 
1247  if (release)
1248  ubifs_release_budget(c, &req);
1249  if (IS_SYNC(inode))
1250  err = inode->i_sb->s_op->write_inode(inode, NULL);
1251  return err;
1252 }
1253 
1254 int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
1255 {
1256  int err;
1257  struct inode *inode = dentry->d_inode;
1258  struct ubifs_info *c = inode->i_sb->s_fs_info;
1259 
1260  dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1261  inode->i_ino, inode->i_mode, attr->ia_valid);
1262  err = inode_change_ok(inode, attr);
1263  if (err)
1264  return err;
1265 
1266  err = dbg_check_synced_i_size(c, inode);
1267  if (err)
1268  return err;
1269 
1270  if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1271  /* Truncation to a smaller size */
1272  err = do_truncation(c, inode, attr);
1273  else
1274  err = do_setattr(c, inode, attr);
1275 
1276  return err;
1277 }
1278 
1279 static void ubifs_invalidatepage(struct page *page, unsigned long offset)
1280 {
1281  struct inode *inode = page->mapping->host;
1282  struct ubifs_info *c = inode->i_sb->s_fs_info;
1283 
1284  ubifs_assert(PagePrivate(page));
1285  if (offset)
1286  /* Partial page remains dirty */
1287  return;
1288 
1289  if (PageChecked(page))
1290  release_new_page_budget(c);
1291  else
1292  release_existing_page_budget(c);
1293 
1294  atomic_long_dec(&c->dirty_pg_cnt);
1295  ClearPagePrivate(page);
1296  ClearPageChecked(page);
1297 }
1298 
1299 static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd)
1300 {
1301  struct ubifs_inode *ui = ubifs_inode(dentry->d_inode);
1302 
1303  nd_set_link(nd, ui->data);
1304  return NULL;
1305 }
1306 
1307 int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1308 {
1309  struct inode *inode = file->f_mapping->host;
1310  struct ubifs_info *c = inode->i_sb->s_fs_info;
1311  int err;
1312 
1313  dbg_gen("syncing inode %lu", inode->i_ino);
1314 
1315  if (c->ro_mount)
1316  /*
1317  * For some really strange reasons VFS does not filter out
1318  * 'fsync()' for R/O mounted file-systems as per 2.6.39.
1319  */
1320  return 0;
1321 
1322  err = filemap_write_and_wait_range(inode->i_mapping, start, end);
1323  if (err)
1324  return err;
1325  mutex_lock(&inode->i_mutex);
1326 
1327  /* Synchronize the inode unless this is a 'datasync()' call. */
1328  if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1329  err = inode->i_sb->s_op->write_inode(inode, NULL);
1330  if (err)
1331  goto out;
1332  }
1333 
1334  /*
1335  * Nodes related to this inode may still sit in a write-buffer. Flush
1336  * them.
1337  */
1338  err = ubifs_sync_wbufs_by_inode(c, inode);
1339 out:
1340  mutex_unlock(&inode->i_mutex);
1341  return err;
1342 }
1343 
1353 static inline int mctime_update_needed(const struct inode *inode,
1354  const struct timespec *now)
1355 {
1356  if (!timespec_equal(&inode->i_mtime, now) ||
1357  !timespec_equal(&inode->i_ctime, now))
1358  return 1;
1359  return 0;
1360 }
1361 
1371 static int update_mctime(struct ubifs_info *c, struct inode *inode)
1372 {
1373  struct timespec now = ubifs_current_time(inode);
1374  struct ubifs_inode *ui = ubifs_inode(inode);
1375 
1376  if (mctime_update_needed(inode, &now)) {
1377  int err, release;
1378  struct ubifs_budget_req req = { .dirtied_ino = 1,
1379  .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1380 
1381  err = ubifs_budget_space(c, &req);
1382  if (err)
1383  return err;
1384 
1385  mutex_lock(&ui->ui_mutex);
1386  inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1387  release = ui->dirty;
1388  mark_inode_dirty_sync(inode);
1389  mutex_unlock(&ui->ui_mutex);
1390  if (release)
1391  ubifs_release_budget(c, &req);
1392  }
1393 
1394  return 0;
1395 }
1396 
1397 static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
1398  unsigned long nr_segs, loff_t pos)
1399 {
1400  int err;
1401  struct inode *inode = iocb->ki_filp->f_mapping->host;
1402  struct ubifs_info *c = inode->i_sb->s_fs_info;
1403 
1404  err = update_mctime(c, inode);
1405  if (err)
1406  return err;
1407 
1408  return generic_file_aio_write(iocb, iov, nr_segs, pos);
1409 }
1410 
1411 static int ubifs_set_page_dirty(struct page *page)
1412 {
1413  int ret;
1414 
1415  ret = __set_page_dirty_nobuffers(page);
1416  /*
1417  * An attempt to dirty a page without budgeting for it - should not
1418  * happen.
1419  */
1420  ubifs_assert(ret == 0);
1421  return ret;
1422 }
1423 
1424 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
1425 {
1426  /*
1427  * An attempt to release a dirty page without budgeting for it - should
1428  * not happen.
1429  */
1430  if (PageWriteback(page))
1431  return 0;
1432  ubifs_assert(PagePrivate(page));
1433  ubifs_assert(0);
1434  ClearPagePrivate(page);
1435  ClearPageChecked(page);
1436  return 1;
1437 }
1438 
1439 /*
1440  * mmap()d file has taken write protection fault and is being made writable.
1441  * UBIFS must ensure page is budgeted for.
1442  */
1443 static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
1444  struct vm_fault *vmf)
1445 {
1446  struct page *page = vmf->page;
1447  struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1448  struct ubifs_info *c = inode->i_sb->s_fs_info;
1449  struct timespec now = ubifs_current_time(inode);
1450  struct ubifs_budget_req req = { .new_page = 1 };
1451  int err, update_time;
1452 
1453  dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
1454  i_size_read(inode));
1455  ubifs_assert(!c->ro_media && !c->ro_mount);
1456 
1457  if (unlikely(c->ro_error))
1458  return VM_FAULT_SIGBUS; /* -EROFS */
1459 
1460  /*
1461  * We have not locked @page so far so we may budget for changing the
1462  * page. Note, we cannot do this after we locked the page, because
1463  * budgeting may cause write-back which would cause deadlock.
1464  *
1465  * At the moment we do not know whether the page is dirty or not, so we
1466  * assume that it is not and budget for a new page. We could look at
1467  * the @PG_private flag and figure this out, but we may race with write
1468  * back and the page state may change by the time we lock it, so this
1469  * would need additional care. We do not bother with this at the
1470  * moment, although it might be good idea to do. Instead, we allocate
1471  * budget for a new page and amend it later on if the page was in fact
1472  * dirty.
1473  *
1474  * The budgeting-related logic of this function is similar to what we
1475  * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there
1476  * for more comments.
1477  */
1478  update_time = mctime_update_needed(inode, &now);
1479  if (update_time)
1480  /*
1481  * We have to change inode time stamp which requires extra
1482  * budgeting.
1483  */
1484  req.dirtied_ino = 1;
1485 
1486  err = ubifs_budget_space(c, &req);
1487  if (unlikely(err)) {
1488  if (err == -ENOSPC)
1489  ubifs_warn("out of space for mmapped file (inode number %lu)",
1490  inode->i_ino);
1491  return VM_FAULT_SIGBUS;
1492  }
1493 
1494  lock_page(page);
1495  if (unlikely(page->mapping != inode->i_mapping ||
1496  page_offset(page) > i_size_read(inode))) {
1497  /* Page got truncated out from underneath us */
1498  err = -EINVAL;
1499  goto out_unlock;
1500  }
1501 
1502  if (PagePrivate(page))
1503  release_new_page_budget(c);
1504  else {
1505  if (!PageChecked(page))
1507  SetPagePrivate(page);
1508  atomic_long_inc(&c->dirty_pg_cnt);
1510  }
1511 
1512  if (update_time) {
1513  int release;
1514  struct ubifs_inode *ui = ubifs_inode(inode);
1515 
1516  mutex_lock(&ui->ui_mutex);
1517  inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1518  release = ui->dirty;
1519  mark_inode_dirty_sync(inode);
1520  mutex_unlock(&ui->ui_mutex);
1521  if (release)
1523  }
1524 
1525  unlock_page(page);
1526  return 0;
1527 
1528 out_unlock:
1529  unlock_page(page);
1530  ubifs_release_budget(c, &req);
1531  if (err)
1532  err = VM_FAULT_SIGBUS;
1533  return err;
1534 }
1535 
1536 static const struct vm_operations_struct ubifs_file_vm_ops = {
1537  .fault = filemap_fault,
1538  .page_mkwrite = ubifs_vm_page_mkwrite,
1539  .remap_pages = generic_file_remap_pages,
1540 };
1541 
1542 static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1543 {
1544  int err;
1545 
1546  err = generic_file_mmap(file, vma);
1547  if (err)
1548  return err;
1549  vma->vm_ops = &ubifs_file_vm_ops;
1550  return 0;
1551 }
1552 
1554  .readpage = ubifs_readpage,
1555  .writepage = ubifs_writepage,
1556  .write_begin = ubifs_write_begin,
1557  .write_end = ubifs_write_end,
1558  .invalidatepage = ubifs_invalidatepage,
1559  .set_page_dirty = ubifs_set_page_dirty,
1560  .releasepage = ubifs_releasepage,
1561 };
1562 
1564  .setattr = ubifs_setattr,
1565  .getattr = ubifs_getattr,
1566  .setxattr = ubifs_setxattr,
1567  .getxattr = ubifs_getxattr,
1568  .listxattr = ubifs_listxattr,
1569  .removexattr = ubifs_removexattr,
1570 };
1571 
1573  .readlink = generic_readlink,
1574  .follow_link = ubifs_follow_link,
1575  .setattr = ubifs_setattr,
1576  .getattr = ubifs_getattr,
1577 };
1578 
1580  .llseek = generic_file_llseek,
1581  .read = do_sync_read,
1582  .write = do_sync_write,
1583  .aio_read = generic_file_aio_read,
1584  .aio_write = ubifs_aio_write,
1585  .mmap = ubifs_file_mmap,
1586  .fsync = ubifs_fsync,
1587  .unlocked_ioctl = ubifs_ioctl,
1588  .splice_read = generic_file_splice_read,
1589  .splice_write = generic_file_splice_write,
1590 #ifdef CONFIG_COMPAT
1591  .compat_ioctl = ubifs_compat_ioctl,
1592 #endif
1593 };