Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xfs_buf_item.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_mount.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
30 #include "xfs_trace.h"
31 
32 
34 
35 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
36 {
37  return container_of(lip, struct xfs_buf_log_item, bli_item);
38 }
39 
40 
41 #ifdef XFS_TRANS_DEBUG
42 /*
43  * This function uses an alternate strategy for tracking the bytes
44  * that the user requests to be logged. This can then be used
45  * in conjunction with the bli_orig array in the buf log item to
46  * catch bugs in our callers' code.
47  *
48  * We also double check the bits set in xfs_buf_item_log using a
49  * simple algorithm to check that every byte is accounted for.
50  */
51 STATIC void
53  xfs_buf_log_item_t *bip,
54  uint first,
55  uint last)
56 {
57  uint x;
58  uint byte;
59  uint nbytes;
60  uint chunk_num;
61  uint word_num;
62  uint bit_num;
63  uint bit_set;
64  uint *wordp;
65 
66  ASSERT(bip->bli_logged != NULL);
67  byte = first;
68  nbytes = last - first + 1;
69  bfset(bip->bli_logged, first, nbytes);
70  for (x = 0; x < nbytes; x++) {
71  chunk_num = byte >> XFS_BLF_SHIFT;
72  word_num = chunk_num >> BIT_TO_WORD_SHIFT;
73  bit_num = chunk_num & (NBWORD - 1);
74  wordp = &(bip->bli_format.blf_data_map[word_num]);
75  bit_set = *wordp & (1 << bit_num);
76  ASSERT(bit_set);
77  byte++;
78  }
79 }
80 
81 /*
82  * This function is called when we flush something into a buffer without
83  * logging it. This happens for things like inodes which are logged
84  * separately from the buffer.
85  */
86 void
87 xfs_buf_item_flush_log_debug(
88  xfs_buf_t *bp,
89  uint first,
90  uint last)
91 {
92  xfs_buf_log_item_t *bip = bp->b_fspriv;
93  uint nbytes;
94 
95  if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
96  return;
97 
98  ASSERT(bip->bli_logged != NULL);
99  nbytes = last - first + 1;
100  bfset(bip->bli_logged, first, nbytes);
101 }
102 
103 /*
104  * This function is called to verify that our callers have logged
105  * all the bytes that they changed.
106  *
107  * It does this by comparing the original copy of the buffer stored in
108  * the buf log item's bli_orig array to the current copy of the buffer
109  * and ensuring that all bytes which mismatch are set in the bli_logged
110  * array of the buf log item.
111  */
112 STATIC void
114  xfs_buf_log_item_t *bip)
115 {
116  char *orig;
117  char *buffer;
118  int x;
119  xfs_buf_t *bp;
120 
121  ASSERT(bip->bli_orig != NULL);
122  ASSERT(bip->bli_logged != NULL);
123 
124  bp = bip->bli_buf;
125  ASSERT(bp->b_length > 0);
126  ASSERT(bp->b_addr != NULL);
127  orig = bip->bli_orig;
128  buffer = bp->b_addr;
129  for (x = 0; x < BBTOB(bp->b_length); x++) {
130  if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
131  xfs_emerg(bp->b_mount,
132  "%s: bip %x buffer %x orig %x index %d",
133  __func__, bip, bp, orig, x);
134  ASSERT(0);
135  }
136  }
137 }
138 #else
139 #define xfs_buf_item_log_debug(x,y,z)
140 #define xfs_buf_item_log_check(x)
141 #endif
142 
143 STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
144 
145 /*
146  * This returns the number of log iovecs needed to log the
147  * given buf log item.
148  *
149  * It calculates this as 1 iovec for the buf log format structure
150  * and 1 for each stretch of non-contiguous chunks to be logged.
151  * Contiguous chunks are logged in a single iovec.
152  *
153  * If the XFS_BLI_STALE flag has been set, then log nothing.
154  */
155 STATIC uint
157  struct xfs_buf_log_item *bip,
158  struct xfs_buf_log_format *blfp)
159 {
160  struct xfs_buf *bp = bip->bli_buf;
161  uint nvecs;
162  int next_bit;
163  int last_bit;
164 
165  last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
166  if (last_bit == -1)
167  return 0;
168 
169  /*
170  * initial count for a dirty buffer is 2 vectors - the format structure
171  * and the first dirty region.
172  */
173  nvecs = 2;
174 
175  while (last_bit != -1) {
176  /*
177  * This takes the bit number to start looking from and
178  * returns the next set bit from there. It returns -1
179  * if there are no more bits set or the start bit is
180  * beyond the end of the bitmap.
181  */
182  next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
183  last_bit + 1);
184  /*
185  * If we run out of bits, leave the loop,
186  * else if we find a new set of bits bump the number of vecs,
187  * else keep scanning the current set of bits.
188  */
189  if (next_bit == -1) {
190  break;
191  } else if (next_bit != last_bit + 1) {
192  last_bit = next_bit;
193  nvecs++;
194  } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
195  (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
196  XFS_BLF_CHUNK)) {
197  last_bit = next_bit;
198  nvecs++;
199  } else {
200  last_bit++;
201  }
202  }
203 
204  return nvecs;
205 }
206 
207 /*
208  * This returns the number of log iovecs needed to log the given buf log item.
209  *
210  * It calculates this as 1 iovec for the buf log format structure and 1 for each
211  * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
212  * in a single iovec.
213  *
214  * Discontiguous buffers need a format structure per region that that is being
215  * logged. This makes the changes in the buffer appear to log recovery as though
216  * they came from separate buffers, just like would occur if multiple buffers
217  * were used instead of a single discontiguous buffer. This enables
218  * discontiguous buffers to be in-memory constructs, completely transparent to
219  * what ends up on disk.
220  *
221  * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
222  * format structures.
223  */
224 STATIC uint
226  struct xfs_log_item *lip)
227 {
228  struct xfs_buf_log_item *bip = BUF_ITEM(lip);
229  uint nvecs;
230  int i;
231 
232  ASSERT(atomic_read(&bip->bli_refcount) > 0);
233  if (bip->bli_flags & XFS_BLI_STALE) {
234  /*
235  * The buffer is stale, so all we need to log
236  * is the buf log format structure with the
237  * cancel flag in it.
238  */
239  trace_xfs_buf_item_size_stale(bip);
240  ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
241  return bip->bli_format_count;
242  }
243 
244  ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
245 
246  /*
247  * the vector count is based on the number of buffer vectors we have
248  * dirty bits in. This will only be greater than one when we have a
249  * compound buffer with more than one segment dirty. Hence for compound
250  * buffers we need to track which segment the dirty bits correspond to,
251  * and when we move from one segment to the next increment the vector
252  * count for the extra buf log format structure that will need to be
253  * written.
254  */
255  nvecs = 0;
256  for (i = 0; i < bip->bli_format_count; i++) {
257  nvecs += xfs_buf_item_size_segment(bip, &bip->bli_formats[i]);
258  }
259 
260  trace_xfs_buf_item_size(bip);
261  return nvecs;
262 }
263 
264 static struct xfs_log_iovec *
265 xfs_buf_item_format_segment(
266  struct xfs_buf_log_item *bip,
267  struct xfs_log_iovec *vecp,
268  uint offset,
269  struct xfs_buf_log_format *blfp)
270 {
271  struct xfs_buf *bp = bip->bli_buf;
272  uint base_size;
273  uint nvecs;
274  int first_bit;
275  int last_bit;
276  int next_bit;
277  uint nbits;
278  uint buffer_offset;
279 
280  /* copy the flags across from the base format item */
281  blfp->blf_flags = bip->bli_format.blf_flags;
282 
283  /*
284  * Base size is the actual size of the ondisk structure - it reflects
285  * the actual size of the dirty bitmap rather than the size of the in
286  * memory structure.
287  */
288  base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
289  (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
290  vecp->i_addr = blfp;
291  vecp->i_len = base_size;
293  vecp++;
294  nvecs = 1;
295 
296  if (bip->bli_flags & XFS_BLI_STALE) {
297  /*
298  * The buffer is stale, so all we need to log
299  * is the buf log format structure with the
300  * cancel flag in it.
301  */
302  trace_xfs_buf_item_format_stale(bip);
304  blfp->blf_size = nvecs;
305  return vecp;
306  }
307 
308  /*
309  * Fill in an iovec for each set of contiguous chunks.
310  */
311  first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
312  ASSERT(first_bit != -1);
313  last_bit = first_bit;
314  nbits = 1;
315  for (;;) {
316  /*
317  * This takes the bit number to start looking from and
318  * returns the next set bit from there. It returns -1
319  * if there are no more bits set or the start bit is
320  * beyond the end of the bitmap.
321  */
322  next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
323  (uint)last_bit + 1);
324  /*
325  * If we run out of bits fill in the last iovec and get
326  * out of the loop.
327  * Else if we start a new set of bits then fill in the
328  * iovec for the series we were looking at and start
329  * counting the bits in the new one.
330  * Else we're still in the same set of bits so just
331  * keep counting and scanning.
332  */
333  if (next_bit == -1) {
334  buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
335  vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
336  vecp->i_len = nbits * XFS_BLF_CHUNK;
338  nvecs++;
339  break;
340  } else if (next_bit != last_bit + 1) {
341  buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
342  vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
343  vecp->i_len = nbits * XFS_BLF_CHUNK;
345  nvecs++;
346  vecp++;
347  first_bit = next_bit;
348  last_bit = next_bit;
349  nbits = 1;
350  } else if (xfs_buf_offset(bp, offset +
351  (next_bit << XFS_BLF_SHIFT)) !=
352  (xfs_buf_offset(bp, offset +
353  (last_bit << XFS_BLF_SHIFT)) +
354  XFS_BLF_CHUNK)) {
355  buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
356  vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
357  vecp->i_len = nbits * XFS_BLF_CHUNK;
359 /*
360  * You would think we need to bump the nvecs here too, but we do not
361  * this number is used by recovery, and it gets confused by the boundary
362  * split here
363  * nvecs++;
364  */
365  vecp++;
366  first_bit = next_bit;
367  last_bit = next_bit;
368  nbits = 1;
369  } else {
370  last_bit++;
371  nbits++;
372  }
373  }
374  bip->bli_format.blf_size = nvecs;
375  return vecp;
376 }
377 
378 /*
379  * This is called to fill in the vector of log iovecs for the
380  * given log buf item. It fills the first entry with a buf log
381  * format structure, and the rest point to contiguous chunks
382  * within the buffer.
383  */
384 STATIC void
386  struct xfs_log_item *lip,
387  struct xfs_log_iovec *vecp)
388 {
389  struct xfs_buf_log_item *bip = BUF_ITEM(lip);
390  struct xfs_buf *bp = bip->bli_buf;
391  uint offset = 0;
392  int i;
393 
394  ASSERT(atomic_read(&bip->bli_refcount) > 0);
395  ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
396  (bip->bli_flags & XFS_BLI_STALE));
397 
398  /*
399  * If it is an inode buffer, transfer the in-memory state to the
400  * format flags and clear the in-memory state. We do not transfer
401  * this state if the inode buffer allocation has not yet been committed
402  * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
403  * correct replay of the inode allocation.
404  */
405  if (bip->bli_flags & XFS_BLI_INODE_BUF) {
406  if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
408  bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
409  bip->bli_flags &= ~XFS_BLI_INODE_BUF;
410  }
411 
412  for (i = 0; i < bip->bli_format_count; i++) {
413  vecp = xfs_buf_item_format_segment(bip, vecp, offset,
414  &bip->bli_formats[i]);
415  offset += bp->b_maps[i].bm_len;
416  }
417 
418  /*
419  * Check to make sure everything is consistent.
420  */
421  trace_xfs_buf_item_format(bip);
423 }
424 
425 /*
426  * This is called to pin the buffer associated with the buf log item in memory
427  * so it cannot be written out.
428  *
429  * We also always take a reference to the buffer log item here so that the bli
430  * is held while the item is pinned in memory. This means that we can
431  * unconditionally drop the reference count a transaction holds when the
432  * transaction is completed.
433  */
434 STATIC void
436  struct xfs_log_item *lip)
437 {
438  struct xfs_buf_log_item *bip = BUF_ITEM(lip);
439 
440  ASSERT(atomic_read(&bip->bli_refcount) > 0);
441  ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
442  (bip->bli_flags & XFS_BLI_STALE));
443 
444  trace_xfs_buf_item_pin(bip);
445 
446  atomic_inc(&bip->bli_refcount);
447  atomic_inc(&bip->bli_buf->b_pin_count);
448 }
449 
450 /*
451  * This is called to unpin the buffer associated with the buf log
452  * item which was previously pinned with a call to xfs_buf_item_pin().
453  *
454  * Also drop the reference to the buf item for the current transaction.
455  * If the XFS_BLI_STALE flag is set and we are the last reference,
456  * then free up the buf log item and unlock the buffer.
457  *
458  * If the remove flag is set we are called from uncommit in the
459  * forced-shutdown path. If that is true and the reference count on
460  * the log item is going to drop to zero we need to free the item's
461  * descriptor in the transaction.
462  */
463 STATIC void
465  struct xfs_log_item *lip,
466  int remove)
467 {
468  struct xfs_buf_log_item *bip = BUF_ITEM(lip);
469  xfs_buf_t *bp = bip->bli_buf;
470  struct xfs_ail *ailp = lip->li_ailp;
471  int stale = bip->bli_flags & XFS_BLI_STALE;
472  int freed;
473 
474  ASSERT(bp->b_fspriv == bip);
475  ASSERT(atomic_read(&bip->bli_refcount) > 0);
476 
477  trace_xfs_buf_item_unpin(bip);
478 
479  freed = atomic_dec_and_test(&bip->bli_refcount);
480 
482  wake_up_all(&bp->b_waiters);
483 
484  if (freed && stale) {
485  ASSERT(bip->bli_flags & XFS_BLI_STALE);
487  ASSERT(XFS_BUF_ISSTALE(bp));
488  ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
489 
490  trace_xfs_buf_item_unpin_stale(bip);
491 
492  if (remove) {
493  /*
494  * If we are in a transaction context, we have to
495  * remove the log item from the transaction as we are
496  * about to release our reference to the buffer. If we
497  * don't, the unlock that occurs later in
498  * xfs_trans_uncommit() will try to reference the
499  * buffer which we no longer have a hold on.
500  */
501  if (lip->li_desc)
502  xfs_trans_del_item(lip);
503 
504  /*
505  * Since the transaction no longer refers to the buffer,
506  * the buffer should no longer refer to the transaction.
507  */
508  bp->b_transp = NULL;
509  }
510 
511  /*
512  * If we get called here because of an IO error, we may
513  * or may not have the item on the AIL. xfs_trans_ail_delete()
514  * will take care of that situation.
515  * xfs_trans_ail_delete() drops the AIL lock.
516  */
517  if (bip->bli_flags & XFS_BLI_STALE_INODE) {
519  bp->b_fspriv = NULL;
520  bp->b_iodone = NULL;
521  } else {
522  spin_lock(&ailp->xa_lock);
523  xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
524  xfs_buf_item_relse(bp);
525  ASSERT(bp->b_fspriv == NULL);
526  }
527  xfs_buf_relse(bp);
528  } else if (freed && remove) {
529  /*
530  * There are currently two references to the buffer - the active
531  * LRU reference and the buf log item. What we are about to do
532  * here - simulate a failed IO completion - requires 3
533  * references.
534  *
535  * The LRU reference is removed by the xfs_buf_stale() call. The
536  * buf item reference is removed by the xfs_buf_iodone()
537  * callback that is run by xfs_buf_do_callbacks() during ioend
538  * processing (via the bp->b_iodone callback), and then finally
539  * the ioend processing will drop the IO reference if the buffer
540  * is marked XBF_ASYNC.
541  *
542  * Hence we need to take an additional reference here so that IO
543  * completion processing doesn't free the buffer prematurely.
544  */
545  xfs_buf_lock(bp);
546  xfs_buf_hold(bp);
547  bp->b_flags |= XBF_ASYNC;
548  xfs_buf_ioerror(bp, EIO);
549  XFS_BUF_UNDONE(bp);
550  xfs_buf_stale(bp);
551  xfs_buf_ioend(bp, 0);
552  }
553 }
554 
555 STATIC uint
557  struct xfs_log_item *lip,
558  struct list_head *buffer_list)
559 {
560  struct xfs_buf_log_item *bip = BUF_ITEM(lip);
561  struct xfs_buf *bp = bip->bli_buf;
562  uint rval = XFS_ITEM_SUCCESS;
563 
564  if (xfs_buf_ispinned(bp))
565  return XFS_ITEM_PINNED;
566  if (!xfs_buf_trylock(bp))
567  return XFS_ITEM_LOCKED;
568 
569  ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
570 
571  trace_xfs_buf_item_push(bip);
572 
573  if (!xfs_buf_delwri_queue(bp, buffer_list))
574  rval = XFS_ITEM_FLUSHING;
575  xfs_buf_unlock(bp);
576  return rval;
577 }
578 
579 /*
580  * Release the buffer associated with the buf log item. If there is no dirty
581  * logged data associated with the buffer recorded in the buf log item, then
582  * free the buf log item and remove the reference to it in the buffer.
583  *
584  * This call ignores the recursion count. It is only called when the buffer
585  * should REALLY be unlocked, regardless of the recursion count.
586  *
587  * We unconditionally drop the transaction's reference to the log item. If the
588  * item was logged, then another reference was taken when it was pinned, so we
589  * can safely drop the transaction reference now. This also allows us to avoid
590  * potential races with the unpin code freeing the bli by not referencing the
591  * bli after we've dropped the reference count.
592  *
593  * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
594  * if necessary but do not unlock the buffer. This is for support of
595  * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
596  * free the item.
597  */
598 STATIC void
600  struct xfs_log_item *lip)
601 {
602  struct xfs_buf_log_item *bip = BUF_ITEM(lip);
603  struct xfs_buf *bp = bip->bli_buf;
604  int aborted;
605  uint hold;
606 
607  /* Clear the buffer's association with this transaction. */
608  bp->b_transp = NULL;
609 
610  /*
611  * If this is a transaction abort, don't return early. Instead, allow
612  * the brelse to happen. Normally it would be done for stale
613  * (cancelled) buffers at unpin time, but we'll never go through the
614  * pin/unpin cycle if we abort inside commit.
615  */
616  aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
617 
618  /*
619  * Before possibly freeing the buf item, determine if we should
620  * release the buffer at the end of this routine.
621  */
622  hold = bip->bli_flags & XFS_BLI_HOLD;
623 
624  /* Clear the per transaction state. */
625  bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
626 
627  /*
628  * If the buf item is marked stale, then don't do anything. We'll
629  * unlock the buffer and free the buf item when the buffer is unpinned
630  * for the last time.
631  */
632  if (bip->bli_flags & XFS_BLI_STALE) {
633  trace_xfs_buf_item_unlock_stale(bip);
634  ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
635  if (!aborted) {
636  atomic_dec(&bip->bli_refcount);
637  return;
638  }
639  }
640 
641  trace_xfs_buf_item_unlock(bip);
642 
643  /*
644  * If the buf item isn't tracking any data, free it, otherwise drop the
645  * reference we hold to it.
646  */
647  if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
648  bip->bli_format.blf_map_size))
649  xfs_buf_item_relse(bp);
650  else
651  atomic_dec(&bip->bli_refcount);
652 
653  if (!hold)
654  xfs_buf_relse(bp);
655 }
656 
657 /*
658  * This is called to find out where the oldest active copy of the
659  * buf log item in the on disk log resides now that the last log
660  * write of it completed at the given lsn.
661  * We always re-log all the dirty data in a buffer, so usually the
662  * latest copy in the on disk log is the only one that matters. For
663  * those cases we simply return the given lsn.
664  *
665  * The one exception to this is for buffers full of newly allocated
666  * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
667  * flag set, indicating that only the di_next_unlinked fields from the
668  * inodes in the buffers will be replayed during recovery. If the
669  * original newly allocated inode images have not yet been flushed
670  * when the buffer is so relogged, then we need to make sure that we
671  * keep the old images in the 'active' portion of the log. We do this
672  * by returning the original lsn of that transaction here rather than
673  * the current one.
674  */
677  struct xfs_log_item *lip,
678  xfs_lsn_t lsn)
679 {
680  struct xfs_buf_log_item *bip = BUF_ITEM(lip);
681 
682  trace_xfs_buf_item_committed(bip);
683 
684  if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
685  return lip->li_lsn;
686  return lsn;
687 }
688 
689 STATIC void
691  struct xfs_log_item *lip,
692  xfs_lsn_t commit_lsn)
693 {
694 }
695 
696 /*
697  * This is the ops vector shared by all buf log items.
698  */
699 static const struct xfs_item_ops xfs_buf_item_ops = {
700  .iop_size = xfs_buf_item_size,
701  .iop_format = xfs_buf_item_format,
702  .iop_pin = xfs_buf_item_pin,
703  .iop_unpin = xfs_buf_item_unpin,
704  .iop_unlock = xfs_buf_item_unlock,
705  .iop_committed = xfs_buf_item_committed,
706  .iop_push = xfs_buf_item_push,
707  .iop_committing = xfs_buf_item_committing
708 };
709 
710 STATIC int
712  struct xfs_buf_log_item *bip,
713  int count)
714 {
715  ASSERT(bip->bli_formats == NULL);
716  bip->bli_format_count = count;
717 
718  if (count == 1) {
719  bip->bli_formats = &bip->bli_format;
720  return 0;
721  }
722 
723  bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
724  KM_SLEEP);
725  if (!bip->bli_formats)
726  return ENOMEM;
727  return 0;
728 }
729 
730 STATIC void
732  struct xfs_buf_log_item *bip)
733 {
734  if (bip->bli_formats != &bip->bli_format) {
735  kmem_free(bip->bli_formats);
736  bip->bli_formats = NULL;
737  }
738 }
739 
740 /*
741  * Allocate a new buf log item to go with the given buffer.
742  * Set the buffer's b_fsprivate field to point to the new
743  * buf log item. If there are other item's attached to the
744  * buffer (see xfs_buf_attach_iodone() below), then put the
745  * buf log item at the front.
746  */
747 void
749  xfs_buf_t *bp,
750  xfs_mount_t *mp)
751 {
752  xfs_log_item_t *lip = bp->b_fspriv;
753  xfs_buf_log_item_t *bip;
754  int chunks;
755  int map_size;
756  int error;
757  int i;
758 
759  /*
760  * Check to see if there is already a buf log item for
761  * this buffer. If there is, it is guaranteed to be
762  * the first. If we do already have one, there is
763  * nothing to do here so return.
764  */
765  ASSERT(bp->b_target->bt_mount == mp);
766  if (lip != NULL && lip->li_type == XFS_LI_BUF)
767  return;
768 
770  xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
771  bip->bli_buf = bp;
772  xfs_buf_hold(bp);
773 
774  /*
775  * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
776  * can be divided into. Make sure not to truncate any pieces.
777  * map_size is the size of the bitmap needed to describe the
778  * chunks of the buffer.
779  *
780  * Discontiguous buffer support follows the layout of the underlying
781  * buffer. This makes the implementation as simple as possible.
782  */
783  error = xfs_buf_item_get_format(bip, bp->b_map_count);
784  ASSERT(error == 0);
785 
786  for (i = 0; i < bip->bli_format_count; i++) {
787  chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
788  XFS_BLF_CHUNK);
789  map_size = DIV_ROUND_UP(chunks, NBWORD);
790 
791  bip->bli_formats[i].blf_type = XFS_LI_BUF;
792  bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
793  bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
794  bip->bli_formats[i].blf_map_size = map_size;
795  }
796 
797 #ifdef XFS_TRANS_DEBUG
798  /*
799  * Allocate the arrays for tracking what needs to be logged
800  * and what our callers request to be logged. bli_orig
801  * holds a copy of the original, clean buffer for comparison
802  * against, and bli_logged keeps a 1 bit flag per byte in
803  * the buffer to indicate which bytes the callers have asked
804  * to have logged.
805  */
806  bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
807  memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
808  bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
809 #endif
810 
811  /*
812  * Put the buf item into the list of items attached to the
813  * buffer at the front.
814  */
815  if (bp->b_fspriv)
816  bip->bli_item.li_bio_list = bp->b_fspriv;
817  bp->b_fspriv = bip;
818 }
819 
820 
821 /*
822  * Mark bytes first through last inclusive as dirty in the buf
823  * item's bitmap.
824  */
825 void
827  struct xfs_buf_log_item *bip,
828  uint first,
829  uint last,
830  uint *map)
831 {
832  uint first_bit;
833  uint last_bit;
834  uint bits_to_set;
835  uint bits_set;
836  uint word_num;
837  uint *wordp;
838  uint bit;
839  uint end_bit;
840  uint mask;
841 
842  /*
843  * Convert byte offsets to bit numbers.
844  */
845  first_bit = first >> XFS_BLF_SHIFT;
846  last_bit = last >> XFS_BLF_SHIFT;
847 
848  /*
849  * Calculate the total number of bits to be set.
850  */
851  bits_to_set = last_bit - first_bit + 1;
852 
853  /*
854  * Get a pointer to the first word in the bitmap
855  * to set a bit in.
856  */
857  word_num = first_bit >> BIT_TO_WORD_SHIFT;
858  wordp = &map[word_num];
859 
860  /*
861  * Calculate the starting bit in the first word.
862  */
863  bit = first_bit & (uint)(NBWORD - 1);
864 
865  /*
866  * First set any bits in the first word of our range.
867  * If it starts at bit 0 of the word, it will be
868  * set below rather than here. That is what the variable
869  * bit tells us. The variable bits_set tracks the number
870  * of bits that have been set so far. End_bit is the number
871  * of the last bit to be set in this word plus one.
872  */
873  if (bit) {
874  end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
875  mask = ((1 << (end_bit - bit)) - 1) << bit;
876  *wordp |= mask;
877  wordp++;
878  bits_set = end_bit - bit;
879  } else {
880  bits_set = 0;
881  }
882 
883  /*
884  * Now set bits a whole word at a time that are between
885  * first_bit and last_bit.
886  */
887  while ((bits_to_set - bits_set) >= NBWORD) {
888  *wordp |= 0xffffffff;
889  bits_set += NBWORD;
890  wordp++;
891  }
892 
893  /*
894  * Finally, set any bits left to be set in one last partial word.
895  */
896  end_bit = bits_to_set - bits_set;
897  if (end_bit) {
898  mask = (1 << end_bit) - 1;
899  *wordp |= mask;
900  }
901 
902  xfs_buf_item_log_debug(bip, first, last);
903 }
904 
905 /*
906  * Mark bytes first through last inclusive as dirty in the buf
907  * item's bitmap.
908  */
909 void
911  xfs_buf_log_item_t *bip,
912  uint first,
913  uint last)
914 {
915  int i;
916  uint start;
917  uint end;
918  struct xfs_buf *bp = bip->bli_buf;
919 
920  /*
921  * Mark the item as having some dirty data for
922  * quick reference in xfs_buf_item_dirty.
923  */
924  bip->bli_flags |= XFS_BLI_DIRTY;
925 
926  /*
927  * walk each buffer segment and mark them dirty appropriately.
928  */
929  start = 0;
930  for (i = 0; i < bip->bli_format_count; i++) {
931  if (start > last)
932  break;
933  end = start + BBTOB(bp->b_maps[i].bm_len);
934  if (first > end) {
935  start += BBTOB(bp->b_maps[i].bm_len);
936  continue;
937  }
938  if (first < start)
939  first = start;
940  if (end > last)
941  end = last;
942 
943  xfs_buf_item_log_segment(bip, first, end,
944  &bip->bli_formats[i].blf_data_map[0]);
945 
946  start += bp->b_maps[i].bm_len;
947  }
948 }
949 
950 
951 /*
952  * Return 1 if the buffer has some data that has been logged (at any
953  * point, not just the current transaction) and 0 if not.
954  */
955 uint
957  xfs_buf_log_item_t *bip)
958 {
959  return (bip->bli_flags & XFS_BLI_DIRTY);
960 }
961 
962 STATIC void
964  xfs_buf_log_item_t *bip)
965 {
966 #ifdef XFS_TRANS_DEBUG
967  kmem_free(bip->bli_orig);
968  kmem_free(bip->bli_logged);
969 #endif /* XFS_TRANS_DEBUG */
970 
972  kmem_zone_free(xfs_buf_item_zone, bip);
973 }
974 
975 /*
976  * This is called when the buf log item is no longer needed. It should
977  * free the buf log item associated with the given buffer and clear
978  * the buffer's pointer to the buf log item. If there are no more
979  * items in the list, clear the b_iodone field of the buffer (see
980  * xfs_buf_attach_iodone() below).
981  */
982 void
984  xfs_buf_t *bp)
985 {
986  xfs_buf_log_item_t *bip;
987 
988  trace_xfs_buf_item_relse(bp, _RET_IP_);
989 
990  bip = bp->b_fspriv;
991  bp->b_fspriv = bip->bli_item.li_bio_list;
992  if (bp->b_fspriv == NULL)
993  bp->b_iodone = NULL;
994 
995  xfs_buf_rele(bp);
996  xfs_buf_item_free(bip);
997 }
998 
999 
1000 /*
1001  * Add the given log item with its callback to the list of callbacks
1002  * to be called when the buffer's I/O completes. If it is not set
1003  * already, set the buffer's b_iodone() routine to be
1004  * xfs_buf_iodone_callbacks() and link the log item into the list of
1005  * items rooted at b_fsprivate. Items are always added as the second
1006  * entry in the list if there is a first, because the buf item code
1007  * assumes that the buf log item is first.
1008  */
1009 void
1011  xfs_buf_t *bp,
1012  void (*cb)(xfs_buf_t *, xfs_log_item_t *),
1013  xfs_log_item_t *lip)
1014 {
1015  xfs_log_item_t *head_lip;
1016 
1017  ASSERT(xfs_buf_islocked(bp));
1018 
1019  lip->li_cb = cb;
1020  head_lip = bp->b_fspriv;
1021  if (head_lip) {
1022  lip->li_bio_list = head_lip->li_bio_list;
1023  head_lip->li_bio_list = lip;
1024  } else {
1025  bp->b_fspriv = lip;
1026  }
1027 
1028  ASSERT(bp->b_iodone == NULL ||
1031 }
1032 
1033 /*
1034  * We can have many callbacks on a buffer. Running the callbacks individually
1035  * can cause a lot of contention on the AIL lock, so we allow for a single
1036  * callback to be able to scan the remaining lip->li_bio_list for other items
1037  * of the same type and callback to be processed in the first call.
1038  *
1039  * As a result, the loop walking the callback list below will also modify the
1040  * list. it removes the first item from the list and then runs the callback.
1041  * The loop then restarts from the new head of the list. This allows the
1042  * callback to scan and modify the list attached to the buffer and we don't
1043  * have to care about maintaining a next item pointer.
1044  */
1045 STATIC void
1047  struct xfs_buf *bp)
1048 {
1049  struct xfs_log_item *lip;
1050 
1051  while ((lip = bp->b_fspriv) != NULL) {
1052  bp->b_fspriv = lip->li_bio_list;
1053  ASSERT(lip->li_cb != NULL);
1054  /*
1055  * Clear the next pointer so we don't have any
1056  * confusion if the item is added to another buf.
1057  * Don't touch the log item after calling its
1058  * callback, because it could have freed itself.
1059  */
1060  lip->li_bio_list = NULL;
1061  lip->li_cb(bp, lip);
1062  }
1063 }
1064 
1065 /*
1066  * This is the iodone() function for buffers which have had callbacks
1067  * attached to them by xfs_buf_attach_iodone(). It should remove each
1068  * log item from the buffer's list and call the callback of each in turn.
1069  * When done, the buffer's fsprivate field is set to NULL and the buffer
1070  * is unlocked with a call to iodone().
1071  */
1072 void
1074  struct xfs_buf *bp)
1075 {
1076  struct xfs_log_item *lip = bp->b_fspriv;
1077  struct xfs_mount *mp = lip->li_mountp;
1078  static ulong lasttime;
1079  static xfs_buftarg_t *lasttarg;
1080 
1081  if (likely(!xfs_buf_geterror(bp)))
1082  goto do_callbacks;
1083 
1084  /*
1085  * If we've already decided to shutdown the filesystem because of
1086  * I/O errors, there's no point in giving this a retry.
1087  */
1088  if (XFS_FORCED_SHUTDOWN(mp)) {
1089  xfs_buf_stale(bp);
1090  XFS_BUF_DONE(bp);
1091  trace_xfs_buf_item_iodone(bp, _RET_IP_);
1092  goto do_callbacks;
1093  }
1094 
1095  if (bp->b_target != lasttarg ||
1096  time_after(jiffies, (lasttime + 5*HZ))) {
1097  lasttime = jiffies;
1098  xfs_buf_ioerror_alert(bp, __func__);
1099  }
1100  lasttarg = bp->b_target;
1101 
1102  /*
1103  * If the write was asynchronous then no one will be looking for the
1104  * error. Clear the error state and write the buffer out again.
1105  *
1106  * XXX: This helps against transient write errors, but we need to find
1107  * a way to shut the filesystem down if the writes keep failing.
1108  *
1109  * In practice we'll shut the filesystem down soon as non-transient
1110  * erorrs tend to affect the whole device and a failing log write
1111  * will make us give up. But we really ought to do better here.
1112  */
1113  if (XFS_BUF_ISASYNC(bp)) {
1114  ASSERT(bp->b_iodone != NULL);
1115 
1116  trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1117 
1118  xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
1119 
1120  if (!XFS_BUF_ISSTALE(bp)) {
1121  bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
1122  xfs_buf_iorequest(bp);
1123  } else {
1124  xfs_buf_relse(bp);
1125  }
1126 
1127  return;
1128  }
1129 
1130  /*
1131  * If the write of the buffer was synchronous, we want to make
1132  * sure to return the error to the caller of xfs_bwrite().
1133  */
1134  xfs_buf_stale(bp);
1135  XFS_BUF_DONE(bp);
1136 
1137  trace_xfs_buf_error_relse(bp, _RET_IP_);
1138 
1139 do_callbacks:
1141  bp->b_fspriv = NULL;
1142  bp->b_iodone = NULL;
1143  xfs_buf_ioend(bp, 0);
1144 }
1145 
1146 /*
1147  * This is the iodone() function for buffers which have been
1148  * logged. It is called when they are eventually flushed out.
1149  * It should remove the buf item from the AIL, and free the buf item.
1150  * It is called by xfs_buf_iodone_callbacks() above which will take
1151  * care of cleaning up the buffer itself.
1152  */
1153 void
1155  struct xfs_buf *bp,
1156  struct xfs_log_item *lip)
1157 {
1158  struct xfs_ail *ailp = lip->li_ailp;
1159 
1160  ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1161 
1162  xfs_buf_rele(bp);
1163 
1164  /*
1165  * If we are forcibly shutting down, this may well be
1166  * off the AIL already. That's because we simulate the
1167  * log-committed callbacks to unpin these buffers. Or we may never
1168  * have put this item on AIL because of the transaction was
1169  * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1170  *
1171  * Either way, AIL is useless if we're forcing a shutdown.
1172  */
1173  spin_lock(&ailp->xa_lock);
1174  xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1175  xfs_buf_item_free(BUF_ITEM(lip));
1176 }