Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fs-writeback.c
Go to the documentation of this file.
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes. ie: data writeback. Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002 Andrew Morton
12  * Split out of fs/inode.c
13  * Additions for address_space-based writeback
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/pagemap.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/writeback.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/tracepoint.h>
30 #include "internal.h"
31 
32 /*
33  * 4MB minimal write chunk size
34  */
35 #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
36 
37 /*
38  * Passed into wb_writeback(), essentially a subset of writeback_control
39  */
41  long nr_pages;
42  struct super_block *sb;
43  unsigned long *older_than_this;
45  unsigned int tagged_writepages:1;
46  unsigned int for_kupdate:1;
47  unsigned int range_cyclic:1;
48  unsigned int for_background:1;
49  enum wb_reason reason; /* why was writeback initiated? */
50 
51  struct list_head list; /* pending work list */
52  struct completion *done; /* set if the caller waits */
53 };
54 
63 {
64  return test_bit(BDI_writeback_running, &bdi->state);
65 }
67 
68 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
69 {
70  struct super_block *sb = inode->i_sb;
71 
72  if (strcmp(sb->s_type->name, "bdev") == 0)
73  return inode->i_mapping->backing_dev_info;
74 
75  return sb->s_bdi;
76 }
77 
78 static inline struct inode *wb_inode(struct list_head *head)
79 {
80  return list_entry(head, struct inode, i_wb_list);
81 }
82 
83 /*
84  * Include the creation of the trace points after defining the
85  * wb_writeback_work structure and inline functions so that the definition
86  * remains local to this file.
87  */
88 #define CREATE_TRACE_POINTS
89 #include <trace/events/writeback.h>
90 
91 /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
92 static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
93 {
94  if (bdi->wb.task) {
95  wake_up_process(bdi->wb.task);
96  } else {
97  /*
98  * The bdi thread isn't there, wake up the forker thread which
99  * will create and run it.
100  */
102  }
103 }
104 
105 static void bdi_queue_work(struct backing_dev_info *bdi,
106  struct wb_writeback_work *work)
107 {
108  trace_writeback_queue(bdi, work);
109 
110  spin_lock_bh(&bdi->wb_lock);
111  list_add_tail(&work->list, &bdi->work_list);
112  if (!bdi->wb.task)
113  trace_writeback_nothread(bdi, work);
114  bdi_wakeup_flusher(bdi);
115  spin_unlock_bh(&bdi->wb_lock);
116 }
117 
118 static void
119 __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
120  bool range_cyclic, enum wb_reason reason)
121 {
122  struct wb_writeback_work *work;
123 
124  /*
125  * This is WB_SYNC_NONE writeback, so if allocation fails just
126  * wakeup the thread for old dirty data writeback
127  */
128  work = kzalloc(sizeof(*work), GFP_ATOMIC);
129  if (!work) {
130  if (bdi->wb.task) {
131  trace_writeback_nowork(bdi);
132  wake_up_process(bdi->wb.task);
133  }
134  return;
135  }
136 
137  work->sync_mode = WB_SYNC_NONE;
138  work->nr_pages = nr_pages;
139  work->range_cyclic = range_cyclic;
140  work->reason = reason;
141 
142  bdi_queue_work(bdi, work);
143 }
144 
157 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
158  enum wb_reason reason)
159 {
160  __bdi_start_writeback(bdi, nr_pages, true, reason);
161 }
162 
174 {
175  /*
176  * We just wake up the flusher thread. It will perform background
177  * writeback as soon as there is no other work to do.
178  */
179  trace_writeback_wake_background(bdi);
180  spin_lock_bh(&bdi->wb_lock);
181  bdi_wakeup_flusher(bdi);
182  spin_unlock_bh(&bdi->wb_lock);
183 }
184 
185 /*
186  * Remove the inode from the writeback list it is on.
187  */
188 void inode_wb_list_del(struct inode *inode)
189 {
190  struct backing_dev_info *bdi = inode_to_bdi(inode);
191 
192  spin_lock(&bdi->wb.list_lock);
193  list_del_init(&inode->i_wb_list);
194  spin_unlock(&bdi->wb.list_lock);
195 }
196 
197 /*
198  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
199  * furthest end of its superblock's dirty-inode list.
200  *
201  * Before stamping the inode's ->dirtied_when, we check to see whether it is
202  * already the most-recently-dirtied inode on the b_dirty list. If that is
203  * the case then the inode must have been redirtied while it was being written
204  * out and we don't reset its dirtied_when.
205  */
206 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
207 {
209  if (!list_empty(&wb->b_dirty)) {
210  struct inode *tail;
211 
212  tail = wb_inode(wb->b_dirty.next);
213  if (time_before(inode->dirtied_when, tail->dirtied_when))
214  inode->dirtied_when = jiffies;
215  }
216  list_move(&inode->i_wb_list, &wb->b_dirty);
217 }
218 
219 /*
220  * requeue inode for re-scanning after bdi->b_io list is exhausted.
221  */
222 static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
223 {
225  list_move(&inode->i_wb_list, &wb->b_more_io);
226 }
227 
228 static void inode_sync_complete(struct inode *inode)
229 {
230  inode->i_state &= ~I_SYNC;
231  /* If inode is clean an unused, put it into LRU now... */
232  inode_add_lru(inode);
233  /* Waiters must see I_SYNC cleared before being woken up */
234  smp_mb();
235  wake_up_bit(&inode->i_state, __I_SYNC);
236 }
237 
238 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
239 {
240  bool ret = time_after(inode->dirtied_when, t);
241 #ifndef CONFIG_64BIT
242  /*
243  * For inodes being constantly redirtied, dirtied_when can get stuck.
244  * It _appears_ to be in the future, but is actually in distant past.
245  * This test is necessary to prevent such wrapped-around relative times
246  * from permanently stopping the whole bdi writeback.
247  */
248  ret = ret && time_before_eq(inode->dirtied_when, jiffies);
249 #endif
250  return ret;
251 }
252 
253 /*
254  * Move expired (dirtied before work->older_than_this) dirty inodes from
255  * @delaying_queue to @dispatch_queue.
256  */
257 static int move_expired_inodes(struct list_head *delaying_queue,
258  struct list_head *dispatch_queue,
259  struct wb_writeback_work *work)
260 {
261  LIST_HEAD(tmp);
262  struct list_head *pos, *node;
263  struct super_block *sb = NULL;
264  struct inode *inode;
265  int do_sb_sort = 0;
266  int moved = 0;
267 
268  while (!list_empty(delaying_queue)) {
269  inode = wb_inode(delaying_queue->prev);
270  if (work->older_than_this &&
271  inode_dirtied_after(inode, *work->older_than_this))
272  break;
273  if (sb && sb != inode->i_sb)
274  do_sb_sort = 1;
275  sb = inode->i_sb;
276  list_move(&inode->i_wb_list, &tmp);
277  moved++;
278  }
279 
280  /* just one sb in list, splice to dispatch_queue and we're done */
281  if (!do_sb_sort) {
282  list_splice(&tmp, dispatch_queue);
283  goto out;
284  }
285 
286  /* Move inodes from one superblock together */
287  while (!list_empty(&tmp)) {
288  sb = wb_inode(tmp.prev)->i_sb;
289  list_for_each_prev_safe(pos, node, &tmp) {
290  inode = wb_inode(pos);
291  if (inode->i_sb == sb)
292  list_move(&inode->i_wb_list, dispatch_queue);
293  }
294  }
295 out:
296  return moved;
297 }
298 
299 /*
300  * Queue all expired dirty inodes for io, eldest first.
301  * Before
302  * newly dirtied b_dirty b_io b_more_io
303  * =============> gf edc BA
304  * After
305  * newly dirtied b_dirty b_io b_more_io
306  * =============> g fBAedc
307  * |
308  * +--> dequeue for IO
309  */
310 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
311 {
312  int moved;
314  list_splice_init(&wb->b_more_io, &wb->b_io);
315  moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
316  trace_writeback_queue_io(wb, work, moved);
317 }
318 
319 static int write_inode(struct inode *inode, struct writeback_control *wbc)
320 {
321  if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
322  return inode->i_sb->s_op->write_inode(inode, wbc);
323  return 0;
324 }
325 
326 /*
327  * Wait for writeback on an inode to complete. Called with i_lock held.
328  * Caller must make sure inode cannot go away when we drop i_lock.
329  */
330 static void __inode_wait_for_writeback(struct inode *inode)
331  __releases(inode->i_lock)
332  __acquires(inode->i_lock)
333 {
334  DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
335  wait_queue_head_t *wqh;
336 
337  wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
338  while (inode->i_state & I_SYNC) {
339  spin_unlock(&inode->i_lock);
341  spin_lock(&inode->i_lock);
342  }
343 }
344 
345 /*
346  * Wait for writeback on an inode to complete. Caller must have inode pinned.
347  */
348 void inode_wait_for_writeback(struct inode *inode)
349 {
350  spin_lock(&inode->i_lock);
351  __inode_wait_for_writeback(inode);
352  spin_unlock(&inode->i_lock);
353 }
354 
355 /*
356  * Sleep until I_SYNC is cleared. This function must be called with i_lock
357  * held and drops it. It is aimed for callers not holding any inode reference
358  * so once i_lock is dropped, inode can go away.
359  */
360 static void inode_sleep_on_writeback(struct inode *inode)
361  __releases(inode->i_lock)
362 {
363  DEFINE_WAIT(wait);
365  int sleep;
366 
368  sleep = inode->i_state & I_SYNC;
369  spin_unlock(&inode->i_lock);
370  if (sleep)
371  schedule();
372  finish_wait(wqh, &wait);
373 }
374 
375 /*
376  * Find proper writeback list for the inode depending on its current state and
377  * possibly also change of its state while we were doing writeback. Here we
378  * handle things such as livelock prevention or fairness of writeback among
379  * inodes. This function can be called only by flusher thread - noone else
380  * processes all inodes in writeback lists and requeueing inodes behind flusher
381  * thread's back can have unexpected consequences.
382  */
383 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
384  struct writeback_control *wbc)
385 {
386  if (inode->i_state & I_FREEING)
387  return;
388 
389  /*
390  * Sync livelock prevention. Each inode is tagged and synced in one
391  * shot. If still dirty, it will be redirty_tail()'ed below. Update
392  * the dirty time to prevent enqueue and sync it again.
393  */
394  if ((inode->i_state & I_DIRTY) &&
395  (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
396  inode->dirtied_when = jiffies;
397 
398  if (wbc->pages_skipped) {
399  /*
400  * writeback is not making progress due to locked
401  * buffers. Skip this inode for now.
402  */
403  redirty_tail(inode, wb);
404  return;
405  }
406 
408  /*
409  * We didn't write back all the pages. nfs_writepages()
410  * sometimes bales out without doing anything.
411  */
412  if (wbc->nr_to_write <= 0) {
413  /* Slice used up. Queue for next turn. */
414  requeue_io(inode, wb);
415  } else {
416  /*
417  * Writeback blocked by something other than
418  * congestion. Delay the inode for some time to
419  * avoid spinning on the CPU (100% iowait)
420  * retrying writeback of the dirty page/inode
421  * that cannot be performed immediately.
422  */
423  redirty_tail(inode, wb);
424  }
425  } else if (inode->i_state & I_DIRTY) {
426  /*
427  * Filesystems can dirty the inode during writeback operations,
428  * such as delayed allocation during submission or metadata
429  * updates after data IO completion.
430  */
431  redirty_tail(inode, wb);
432  } else {
433  /* The inode is clean. Remove from writeback lists. */
434  list_del_init(&inode->i_wb_list);
435  }
436 }
437 
438 /*
439  * Write out an inode and its dirty pages. Do not update the writeback list
440  * linkage. That is left to the caller. The caller is also responsible for
441  * setting I_SYNC flag and calling inode_sync_complete() to clear it.
442  */
443 static int
444 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
445 {
446  struct address_space *mapping = inode->i_mapping;
447  long nr_to_write = wbc->nr_to_write;
448  unsigned dirty;
449  int ret;
450 
451  WARN_ON(!(inode->i_state & I_SYNC));
452 
453  ret = do_writepages(mapping, wbc);
454 
455  /*
456  * Make sure to wait on the data before writing out the metadata.
457  * This is important for filesystems that modify metadata on data
458  * I/O completion.
459  */
460  if (wbc->sync_mode == WB_SYNC_ALL) {
461  int err = filemap_fdatawait(mapping);
462  if (ret == 0)
463  ret = err;
464  }
465 
466  /*
467  * Some filesystems may redirty the inode during the writeback
468  * due to delalloc, clear dirty metadata flags right before
469  * write_inode()
470  */
471  spin_lock(&inode->i_lock);
472  /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
473  if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
474  inode->i_state &= ~I_DIRTY_PAGES;
475  dirty = inode->i_state & I_DIRTY;
476  inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
477  spin_unlock(&inode->i_lock);
478  /* Don't write the inode if only I_DIRTY_PAGES was set */
479  if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
480  int err = write_inode(inode, wbc);
481  if (ret == 0)
482  ret = err;
483  }
484  trace_writeback_single_inode(inode, wbc, nr_to_write);
485  return ret;
486 }
487 
488 /*
489  * Write out an inode's dirty pages. Either the caller has an active reference
490  * on the inode or the inode has I_WILL_FREE set.
491  *
492  * This function is designed to be called for writing back one inode which
493  * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
494  * and does more profound writeback list handling in writeback_sb_inodes().
495  */
496 static int
497 writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
498  struct writeback_control *wbc)
499 {
500  int ret = 0;
501 
502  spin_lock(&inode->i_lock);
503  if (!atomic_read(&inode->i_count))
504  WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
505  else
506  WARN_ON(inode->i_state & I_WILL_FREE);
507 
508  if (inode->i_state & I_SYNC) {
509  if (wbc->sync_mode != WB_SYNC_ALL)
510  goto out;
511  /*
512  * It's a data-integrity sync. We must wait. Since callers hold
513  * inode reference or inode has I_WILL_FREE set, it cannot go
514  * away under us.
515  */
516  __inode_wait_for_writeback(inode);
517  }
518  WARN_ON(inode->i_state & I_SYNC);
519  /*
520  * Skip inode if it is clean. We don't want to mess with writeback
521  * lists in this function since flusher thread may be doing for example
522  * sync in parallel and if we move the inode, it could get skipped. So
523  * here we make sure inode is on some writeback list and leave it there
524  * unless we have completely cleaned the inode.
525  */
526  if (!(inode->i_state & I_DIRTY))
527  goto out;
528  inode->i_state |= I_SYNC;
529  spin_unlock(&inode->i_lock);
530 
531  ret = __writeback_single_inode(inode, wbc);
532 
533  spin_lock(&wb->list_lock);
534  spin_lock(&inode->i_lock);
535  /*
536  * If inode is clean, remove it from writeback lists. Otherwise don't
537  * touch it. See comment above for explanation.
538  */
539  if (!(inode->i_state & I_DIRTY))
540  list_del_init(&inode->i_wb_list);
541  spin_unlock(&wb->list_lock);
542  inode_sync_complete(inode);
543 out:
544  spin_unlock(&inode->i_lock);
545  return ret;
546 }
547 
548 static long writeback_chunk_size(struct backing_dev_info *bdi,
549  struct wb_writeback_work *work)
550 {
551  long pages;
552 
553  /*
554  * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
555  * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
556  * here avoids calling into writeback_inodes_wb() more than once.
557  *
558  * The intended call sequence for WB_SYNC_ALL writeback is:
559  *
560  * wb_writeback()
561  * writeback_sb_inodes() <== called only once
562  * write_cache_pages() <== called once for each inode
563  * (quickly) tag currently dirty pages
564  * (maybe slowly) sync all tagged pages
565  */
566  if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
567  pages = LONG_MAX;
568  else {
569  pages = min(bdi->avg_write_bandwidth / 2,
571  pages = min(pages, work->nr_pages);
572  pages = round_down(pages + MIN_WRITEBACK_PAGES,
574  }
575 
576  return pages;
577 }
578 
579 /*
580  * Write a portion of b_io inodes which belong to @sb.
581  *
582  * Return the number of pages and/or inodes written.
583  */
584 static long writeback_sb_inodes(struct super_block *sb,
585  struct bdi_writeback *wb,
586  struct wb_writeback_work *work)
587 {
588  struct writeback_control wbc = {
589  .sync_mode = work->sync_mode,
590  .tagged_writepages = work->tagged_writepages,
591  .for_kupdate = work->for_kupdate,
592  .for_background = work->for_background,
593  .range_cyclic = work->range_cyclic,
594  .range_start = 0,
595  .range_end = LLONG_MAX,
596  };
597  unsigned long start_time = jiffies;
598  long write_chunk;
599  long wrote = 0; /* count both pages and inodes */
600 
601  while (!list_empty(&wb->b_io)) {
602  struct inode *inode = wb_inode(wb->b_io.prev);
603 
604  if (inode->i_sb != sb) {
605  if (work->sb) {
606  /*
607  * We only want to write back data for this
608  * superblock, move all inodes not belonging
609  * to it back onto the dirty list.
610  */
611  redirty_tail(inode, wb);
612  continue;
613  }
614 
615  /*
616  * The inode belongs to a different superblock.
617  * Bounce back to the caller to unpin this and
618  * pin the next superblock.
619  */
620  break;
621  }
622 
623  /*
624  * Don't bother with new inodes or inodes being freed, first
625  * kind does not need periodic writeout yet, and for the latter
626  * kind writeout is handled by the freer.
627  */
628  spin_lock(&inode->i_lock);
629  if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
630  spin_unlock(&inode->i_lock);
631  redirty_tail(inode, wb);
632  continue;
633  }
634  if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
635  /*
636  * If this inode is locked for writeback and we are not
637  * doing writeback-for-data-integrity, move it to
638  * b_more_io so that writeback can proceed with the
639  * other inodes on s_io.
640  *
641  * We'll have another go at writing back this inode
642  * when we completed a full scan of b_io.
643  */
644  spin_unlock(&inode->i_lock);
645  requeue_io(inode, wb);
646  trace_writeback_sb_inodes_requeue(inode);
647  continue;
648  }
649  spin_unlock(&wb->list_lock);
650 
651  /*
652  * We already requeued the inode if it had I_SYNC set and we
653  * are doing WB_SYNC_NONE writeback. So this catches only the
654  * WB_SYNC_ALL case.
655  */
656  if (inode->i_state & I_SYNC) {
657  /* Wait for I_SYNC. This function drops i_lock... */
658  inode_sleep_on_writeback(inode);
659  /* Inode may be gone, start again */
660  spin_lock(&wb->list_lock);
661  continue;
662  }
663  inode->i_state |= I_SYNC;
664  spin_unlock(&inode->i_lock);
665 
666  write_chunk = writeback_chunk_size(wb->bdi, work);
667  wbc.nr_to_write = write_chunk;
668  wbc.pages_skipped = 0;
669 
670  /*
671  * We use I_SYNC to pin the inode in memory. While it is set
672  * evict_inode() will wait so the inode cannot be freed.
673  */
674  __writeback_single_inode(inode, &wbc);
675 
676  work->nr_pages -= write_chunk - wbc.nr_to_write;
677  wrote += write_chunk - wbc.nr_to_write;
678  spin_lock(&wb->list_lock);
679  spin_lock(&inode->i_lock);
680  if (!(inode->i_state & I_DIRTY))
681  wrote++;
682  requeue_inode(inode, wb, &wbc);
683  inode_sync_complete(inode);
684  spin_unlock(&inode->i_lock);
686  /*
687  * bail out to wb_writeback() often enough to check
688  * background threshold and other termination conditions.
689  */
690  if (wrote) {
691  if (time_is_before_jiffies(start_time + HZ / 10UL))
692  break;
693  if (work->nr_pages <= 0)
694  break;
695  }
696  }
697  return wrote;
698 }
699 
700 static long __writeback_inodes_wb(struct bdi_writeback *wb,
701  struct wb_writeback_work *work)
702 {
703  unsigned long start_time = jiffies;
704  long wrote = 0;
705 
706  while (!list_empty(&wb->b_io)) {
707  struct inode *inode = wb_inode(wb->b_io.prev);
708  struct super_block *sb = inode->i_sb;
709 
710  if (!grab_super_passive(sb)) {
711  /*
712  * grab_super_passive() may fail consistently due to
713  * s_umount being grabbed by someone else. Don't use
714  * requeue_io() to avoid busy retrying the inode/sb.
715  */
716  redirty_tail(inode, wb);
717  continue;
718  }
719  wrote += writeback_sb_inodes(sb, wb, work);
720  drop_super(sb);
721 
722  /* refer to the same tests at the end of writeback_sb_inodes */
723  if (wrote) {
724  if (time_is_before_jiffies(start_time + HZ / 10UL))
725  break;
726  if (work->nr_pages <= 0)
727  break;
728  }
729  }
730  /* Leave any unwritten inodes on b_io */
731  return wrote;
732 }
733 
734 long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
735  enum wb_reason reason)
736 {
737  struct wb_writeback_work work = {
738  .nr_pages = nr_pages,
739  .sync_mode = WB_SYNC_NONE,
740  .range_cyclic = 1,
741  .reason = reason,
742  };
743 
744  spin_lock(&wb->list_lock);
745  if (list_empty(&wb->b_io))
746  queue_io(wb, &work);
747  __writeback_inodes_wb(wb, &work);
748  spin_unlock(&wb->list_lock);
749 
750  return nr_pages - work.nr_pages;
751 }
752 
753 static bool over_bground_thresh(struct backing_dev_info *bdi)
754 {
755  unsigned long background_thresh, dirty_thresh;
756 
757  global_dirty_limits(&background_thresh, &dirty_thresh);
758 
759  if (global_page_state(NR_FILE_DIRTY) +
760  global_page_state(NR_UNSTABLE_NFS) > background_thresh)
761  return true;
762 
763  if (bdi_stat(bdi, BDI_RECLAIMABLE) >
764  bdi_dirty_limit(bdi, background_thresh))
765  return true;
766 
767  return false;
768 }
769 
770 /*
771  * Called under wb->list_lock. If there are multiple wb per bdi,
772  * only the flusher working on the first wb should do it.
773  */
774 static void wb_update_bandwidth(struct bdi_writeback *wb,
775  unsigned long start_time)
776 {
777  __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
778 }
779 
780 /*
781  * Explicit flushing or periodic writeback of "old" data.
782  *
783  * Define "old": the first time one of an inode's pages is dirtied, we mark the
784  * dirtying-time in the inode's address_space. So this periodic writeback code
785  * just walks the superblock inode list, writing back any inodes which are
786  * older than a specific point in time.
787  *
788  * Try to run once per dirty_writeback_interval. But if a writeback event
789  * takes longer than a dirty_writeback_interval interval, then leave a
790  * one-second gap.
791  *
792  * older_than_this takes precedence over nr_to_write. So we'll only write back
793  * all dirty pages if they are all attached to "old" mappings.
794  */
795 static long wb_writeback(struct bdi_writeback *wb,
796  struct wb_writeback_work *work)
797 {
798  unsigned long wb_start = jiffies;
799  long nr_pages = work->nr_pages;
800  unsigned long oldest_jif;
801  struct inode *inode;
802  long progress;
803 
804  oldest_jif = jiffies;
805  work->older_than_this = &oldest_jif;
806 
807  spin_lock(&wb->list_lock);
808  for (;;) {
809  /*
810  * Stop writeback when nr_pages has been consumed
811  */
812  if (work->nr_pages <= 0)
813  break;
814 
815  /*
816  * Background writeout and kupdate-style writeback may
817  * run forever. Stop them if there is other work to do
818  * so that e.g. sync can proceed. They'll be restarted
819  * after the other works are all done.
820  */
821  if ((work->for_background || work->for_kupdate) &&
822  !list_empty(&wb->bdi->work_list))
823  break;
824 
825  /*
826  * For background writeout, stop when we are below the
827  * background dirty threshold
828  */
829  if (work->for_background && !over_bground_thresh(wb->bdi))
830  break;
831 
832  /*
833  * Kupdate and background works are special and we want to
834  * include all inodes that need writing. Livelock avoidance is
835  * handled by these works yielding to any other work so we are
836  * safe.
837  */
838  if (work->for_kupdate) {
839  oldest_jif = jiffies -
841  } else if (work->for_background)
842  oldest_jif = jiffies;
843 
844  trace_writeback_start(wb->bdi, work);
845  if (list_empty(&wb->b_io))
846  queue_io(wb, work);
847  if (work->sb)
848  progress = writeback_sb_inodes(work->sb, wb, work);
849  else
850  progress = __writeback_inodes_wb(wb, work);
851  trace_writeback_written(wb->bdi, work);
852 
853  wb_update_bandwidth(wb, wb_start);
854 
855  /*
856  * Did we write something? Try for more
857  *
858  * Dirty inodes are moved to b_io for writeback in batches.
859  * The completion of the current batch does not necessarily
860  * mean the overall work is done. So we keep looping as long
861  * as made some progress on cleaning pages or inodes.
862  */
863  if (progress)
864  continue;
865  /*
866  * No more inodes for IO, bail
867  */
868  if (list_empty(&wb->b_more_io))
869  break;
870  /*
871  * Nothing written. Wait for some inode to
872  * become available for writeback. Otherwise
873  * we'll just busyloop.
874  */
875  if (!list_empty(&wb->b_more_io)) {
876  trace_writeback_wait(wb->bdi, work);
877  inode = wb_inode(wb->b_more_io.prev);
878  spin_lock(&inode->i_lock);
879  spin_unlock(&wb->list_lock);
880  /* This function drops i_lock... */
881  inode_sleep_on_writeback(inode);
882  spin_lock(&wb->list_lock);
883  }
884  }
885  spin_unlock(&wb->list_lock);
886 
887  return nr_pages - work->nr_pages;
888 }
889 
890 /*
891  * Return the next wb_writeback_work struct that hasn't been processed yet.
892  */
893 static struct wb_writeback_work *
894 get_next_work_item(struct backing_dev_info *bdi)
895 {
896  struct wb_writeback_work *work = NULL;
897 
898  spin_lock_bh(&bdi->wb_lock);
899  if (!list_empty(&bdi->work_list)) {
900  work = list_entry(bdi->work_list.next,
901  struct wb_writeback_work, list);
902  list_del_init(&work->list);
903  }
904  spin_unlock_bh(&bdi->wb_lock);
905  return work;
906 }
907 
908 /*
909  * Add in the number of potentially dirty inodes, because each inode
910  * write can dirty pagecache in the underlying blockdev.
911  */
912 static unsigned long get_nr_dirty_pages(void)
913 {
914  return global_page_state(NR_FILE_DIRTY) +
915  global_page_state(NR_UNSTABLE_NFS) +
917 }
918 
919 static long wb_check_background_flush(struct bdi_writeback *wb)
920 {
921  if (over_bground_thresh(wb->bdi)) {
922 
923  struct wb_writeback_work work = {
924  .nr_pages = LONG_MAX,
925  .sync_mode = WB_SYNC_NONE,
926  .for_background = 1,
927  .range_cyclic = 1,
928  .reason = WB_REASON_BACKGROUND,
929  };
930 
931  return wb_writeback(wb, &work);
932  }
933 
934  return 0;
935 }
936 
937 static long wb_check_old_data_flush(struct bdi_writeback *wb)
938 {
939  unsigned long expired;
940  long nr_pages;
941 
942  /*
943  * When set to zero, disable periodic writeback
944  */
946  return 0;
947 
948  expired = wb->last_old_flush +
950  if (time_before(jiffies, expired))
951  return 0;
952 
953  wb->last_old_flush = jiffies;
954  nr_pages = get_nr_dirty_pages();
955 
956  if (nr_pages) {
957  struct wb_writeback_work work = {
958  .nr_pages = nr_pages,
959  .sync_mode = WB_SYNC_NONE,
960  .for_kupdate = 1,
961  .range_cyclic = 1,
962  .reason = WB_REASON_PERIODIC,
963  };
964 
965  return wb_writeback(wb, &work);
966  }
967 
968  return 0;
969 }
970 
971 /*
972  * Retrieve work items and do the writeback they describe
973  */
974 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
975 {
976  struct backing_dev_info *bdi = wb->bdi;
977  struct wb_writeback_work *work;
978  long wrote = 0;
979 
980  set_bit(BDI_writeback_running, &wb->bdi->state);
981  while ((work = get_next_work_item(bdi)) != NULL) {
982  /*
983  * Override sync mode, in case we must wait for completion
984  * because this thread is exiting now.
985  */
986  if (force_wait)
987  work->sync_mode = WB_SYNC_ALL;
988 
989  trace_writeback_exec(bdi, work);
990 
991  wrote += wb_writeback(wb, work);
992 
993  /*
994  * Notify the caller of completion if this is a synchronous
995  * work item, otherwise just free it.
996  */
997  if (work->done)
998  complete(work->done);
999  else
1000  kfree(work);
1001  }
1002 
1003  /*
1004  * Check for periodic writeback, kupdated() style
1005  */
1006  wrote += wb_check_old_data_flush(wb);
1007  wrote += wb_check_background_flush(wb);
1008  clear_bit(BDI_writeback_running, &wb->bdi->state);
1009 
1010  return wrote;
1011 }
1012 
1013 /*
1014  * Handle writeback of dirty data for the device backed by this bdi. Also
1015  * wakes up periodically and does kupdated style flushing.
1016  */
1018 {
1019  struct bdi_writeback *wb = data;
1020  struct backing_dev_info *bdi = wb->bdi;
1021  long pages_written;
1022 
1023  current->flags |= PF_SWAPWRITE;
1024  set_freezable();
1025  wb->last_active = jiffies;
1026 
1027  /*
1028  * Our parent may run at a different priority, just set us to normal
1029  */
1030  set_user_nice(current, 0);
1031 
1032  trace_writeback_thread_start(bdi);
1033 
1035  /*
1036  * Remove own delayed wake-up timer, since we are already awake
1037  * and we'll take care of the preriodic write-back.
1038  */
1039  del_timer(&wb->wakeup_timer);
1040 
1041  pages_written = wb_do_writeback(wb, 0);
1042 
1043  trace_writeback_pages_written(pages_written);
1044 
1045  if (pages_written)
1046  wb->last_active = jiffies;
1047 
1049  if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
1051  continue;
1052  }
1053 
1054  if (wb_has_dirty_io(wb) && dirty_writeback_interval)
1056  else {
1057  /*
1058  * We have nothing to do, so can go sleep without any
1059  * timeout and save power. When a work is queued or
1060  * something is made dirty - we will be woken up.
1061  */
1062  schedule();
1063  }
1064  }
1065 
1066  /* Flush any work that raced with us exiting */
1067  if (!list_empty(&bdi->work_list))
1068  wb_do_writeback(wb, 1);
1069 
1070  trace_writeback_thread_stop(bdi);
1071  return 0;
1072 }
1073 
1074 
1075 /*
1076  * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
1077  * the whole world.
1078  */
1079 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
1080 {
1081  struct backing_dev_info *bdi;
1082 
1083  if (!nr_pages) {
1084  nr_pages = global_page_state(NR_FILE_DIRTY) +
1085  global_page_state(NR_UNSTABLE_NFS);
1086  }
1087 
1088  rcu_read_lock();
1089  list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
1090  if (!bdi_has_dirty_io(bdi))
1091  continue;
1092  __bdi_start_writeback(bdi, nr_pages, false, reason);
1093  }
1094  rcu_read_unlock();
1095 }
1096 
1097 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1098 {
1099  if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1100  struct dentry *dentry;
1101  const char *name = "?";
1102 
1103  dentry = d_find_alias(inode);
1104  if (dentry) {
1105  spin_lock(&dentry->d_lock);
1106  name = (const char *) dentry->d_name.name;
1107  }
1109  "%s(%d): dirtied inode %lu (%s) on %s\n",
1110  current->comm, task_pid_nr(current), inode->i_ino,
1111  name, inode->i_sb->s_id);
1112  if (dentry) {
1113  spin_unlock(&dentry->d_lock);
1114  dput(dentry);
1115  }
1116  }
1117 }
1118 
1143 void __mark_inode_dirty(struct inode *inode, int flags)
1144 {
1145  struct super_block *sb = inode->i_sb;
1146  struct backing_dev_info *bdi = NULL;
1147 
1148  /*
1149  * Don't do this for I_DIRTY_PAGES - that doesn't actually
1150  * dirty the inode itself
1151  */
1152  if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
1153  if (sb->s_op->dirty_inode)
1154  sb->s_op->dirty_inode(inode, flags);
1155  }
1156 
1157  /*
1158  * make sure that changes are seen by all cpus before we test i_state
1159  * -- mikulas
1160  */
1161  smp_mb();
1162 
1163  /* avoid the locking if we can */
1164  if ((inode->i_state & flags) == flags)
1165  return;
1166 
1167  if (unlikely(block_dump))
1168  block_dump___mark_inode_dirty(inode);
1169 
1170  spin_lock(&inode->i_lock);
1171  if ((inode->i_state & flags) != flags) {
1172  const int was_dirty = inode->i_state & I_DIRTY;
1173 
1174  inode->i_state |= flags;
1175 
1176  /*
1177  * If the inode is being synced, just update its dirty state.
1178  * The unlocker will place the inode on the appropriate
1179  * superblock list, based upon its state.
1180  */
1181  if (inode->i_state & I_SYNC)
1182  goto out_unlock_inode;
1183 
1184  /*
1185  * Only add valid (hashed) inodes to the superblock's
1186  * dirty list. Add blockdev inodes as well.
1187  */
1188  if (!S_ISBLK(inode->i_mode)) {
1189  if (inode_unhashed(inode))
1190  goto out_unlock_inode;
1191  }
1192  if (inode->i_state & I_FREEING)
1193  goto out_unlock_inode;
1194 
1195  /*
1196  * If the inode was already on b_dirty/b_io/b_more_io, don't
1197  * reposition it (that would break b_dirty time-ordering).
1198  */
1199  if (!was_dirty) {
1200  bool wakeup_bdi = false;
1201  bdi = inode_to_bdi(inode);
1202 
1203  if (bdi_cap_writeback_dirty(bdi)) {
1204  WARN(!test_bit(BDI_registered, &bdi->state),
1205  "bdi-%s not registered\n", bdi->name);
1206 
1207  /*
1208  * If this is the first dirty inode for this
1209  * bdi, we have to wake-up the corresponding
1210  * bdi thread to make sure background
1211  * write-back happens later.
1212  */
1213  if (!wb_has_dirty_io(&bdi->wb))
1214  wakeup_bdi = true;
1215  }
1216 
1217  spin_unlock(&inode->i_lock);
1218  spin_lock(&bdi->wb.list_lock);
1219  inode->dirtied_when = jiffies;
1220  list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
1221  spin_unlock(&bdi->wb.list_lock);
1222 
1223  if (wakeup_bdi)
1225  return;
1226  }
1227  }
1228 out_unlock_inode:
1229  spin_unlock(&inode->i_lock);
1230 
1231 }
1233 
1234 static void wait_sb_inodes(struct super_block *sb)
1235 {
1236  struct inode *inode, *old_inode = NULL;
1237 
1238  /*
1239  * We need to be protected against the filesystem going from
1240  * r/o to r/w or vice versa.
1241  */
1243 
1244  spin_lock(&inode_sb_list_lock);
1245 
1246  /*
1247  * Data integrity sync. Must wait for all pages under writeback,
1248  * because there may have been pages dirtied before our sync
1249  * call, but which had writeout started before we write it out.
1250  * In which case, the inode may not be on the dirty list, but
1251  * we still have to wait for that writeout.
1252  */
1253  list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1254  struct address_space *mapping = inode->i_mapping;
1255 
1256  spin_lock(&inode->i_lock);
1257  if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1258  (mapping->nrpages == 0)) {
1259  spin_unlock(&inode->i_lock);
1260  continue;
1261  }
1262  __iget(inode);
1263  spin_unlock(&inode->i_lock);
1264  spin_unlock(&inode_sb_list_lock);
1265 
1266  /*
1267  * We hold a reference to 'inode' so it couldn't have been
1268  * removed from s_inodes list while we dropped the
1269  * inode_sb_list_lock. We cannot iput the inode now as we can
1270  * be holding the last reference and we cannot iput it under
1271  * inode_sb_list_lock. So we keep the reference and iput it
1272  * later.
1273  */
1274  iput(old_inode);
1275  old_inode = inode;
1276 
1277  filemap_fdatawait(mapping);
1278 
1279  cond_resched();
1280 
1281  spin_lock(&inode_sb_list_lock);
1282  }
1283  spin_unlock(&inode_sb_list_lock);
1284  iput(old_inode);
1285 }
1286 
1298  unsigned long nr,
1299  enum wb_reason reason)
1300 {
1302  struct wb_writeback_work work = {
1303  .sb = sb,
1304  .sync_mode = WB_SYNC_NONE,
1305  .tagged_writepages = 1,
1306  .done = &done,
1307  .nr_pages = nr,
1308  .reason = reason,
1309  };
1310 
1311  if (sb->s_bdi == &noop_backing_dev_info)
1312  return;
1314  bdi_queue_work(sb->s_bdi, &work);
1316 }
1318 
1328 void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
1329 {
1330  return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
1331 }
1333 
1343 {
1344  if (!writeback_in_progress(sb->s_bdi)) {
1345  down_read(&sb->s_umount);
1346  writeback_inodes_sb(sb, reason);
1347  up_read(&sb->s_umount);
1348  return 1;
1349  } else
1350  return 0;
1351 }
1353 
1364  unsigned long nr,
1365  enum wb_reason reason)
1366 {
1367  if (!writeback_in_progress(sb->s_bdi)) {
1368  down_read(&sb->s_umount);
1369  writeback_inodes_sb_nr(sb, nr, reason);
1370  up_read(&sb->s_umount);
1371  return 1;
1372  } else
1373  return 0;
1374 }
1376 
1384 void sync_inodes_sb(struct super_block *sb)
1385 {
1387  struct wb_writeback_work work = {
1388  .sb = sb,
1389  .sync_mode = WB_SYNC_ALL,
1390  .nr_pages = LONG_MAX,
1391  .range_cyclic = 0,
1392  .done = &done,
1393  .reason = WB_REASON_SYNC,
1394  };
1395 
1396  /* Nothing to do? */
1397  if (sb->s_bdi == &noop_backing_dev_info)
1398  return;
1400 
1401  bdi_queue_work(sb->s_bdi, &work);
1403 
1404  wait_sb_inodes(sb);
1405 }
1407 
1418 int write_inode_now(struct inode *inode, int sync)
1419 {
1420  struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1421  struct writeback_control wbc = {
1422  .nr_to_write = LONG_MAX,
1423  .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1424  .range_start = 0,
1425  .range_end = LLONG_MAX,
1426  };
1427 
1428  if (!mapping_cap_writeback_dirty(inode->i_mapping))
1429  wbc.nr_to_write = 0;
1430 
1431  might_sleep();
1432  return writeback_single_inode(inode, wb, &wbc);
1433 }
1435 
1447 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1448 {
1449  return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1450 }
1452 
1462 int sync_inode_metadata(struct inode *inode, int wait)
1463 {
1464  struct writeback_control wbc = {
1465  .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1466  .nr_to_write = 0, /* metadata-only */
1467  };
1468 
1469  return sync_inode(inode, &wbc);
1470 }