Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xfs_log_priv.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 #ifndef __XFS_LOG_PRIV_H__
19 #define __XFS_LOG_PRIV_H__
20 
21 struct xfs_buf;
22 struct xlog;
23 struct xlog_ticket;
24 struct xfs_mount;
25 
26 /*
27  * Macros, structures, prototypes for internal log manager use.
28  */
29 
30 #define XLOG_MIN_ICLOGS 2
31 #define XLOG_MAX_ICLOGS 8
32 #define XLOG_HEADER_MAGIC_NUM 0xFEEDbabe /* Invalid cycle number */
33 #define XLOG_VERSION_1 1
34 #define XLOG_VERSION_2 2 /* Large IClogs, Log sunit */
35 #define XLOG_VERSION_OKBITS (XLOG_VERSION_1 | XLOG_VERSION_2)
36 #define XLOG_MIN_RECORD_BSIZE (16*1024) /* eventually 32k */
37 #define XLOG_BIG_RECORD_BSIZE (32*1024) /* 32k buffers */
38 #define XLOG_MAX_RECORD_BSIZE (256*1024)
39 #define XLOG_HEADER_CYCLE_SIZE (32*1024) /* cycle data in header */
40 #define XLOG_MIN_RECORD_BSHIFT 14 /* 16384 == 1 << 14 */
41 #define XLOG_BIG_RECORD_BSHIFT 15 /* 32k == 1 << 15 */
42 #define XLOG_MAX_RECORD_BSHIFT 18 /* 256k == 1 << 18 */
43 #define XLOG_BTOLSUNIT(log, b) (((b)+(log)->l_mp->m_sb.sb_logsunit-1) / \
44  (log)->l_mp->m_sb.sb_logsunit)
45 #define XLOG_LSUNITTOB(log, su) ((su) * (log)->l_mp->m_sb.sb_logsunit)
46 
47 #define XLOG_HEADER_SIZE 512
48 
49 #define XLOG_REC_SHIFT(log) \
50  BTOBB(1 << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
51  XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
52 #define XLOG_TOTAL_REC_SHIFT(log) \
53  BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
54  XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
55 
56 static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
57 {
58  return ((xfs_lsn_t)cycle << 32) | block;
59 }
60 
61 static inline uint xlog_get_cycle(char *ptr)
62 {
64  return be32_to_cpu(*((__be32 *)ptr + 1));
65  else
66  return be32_to_cpu(*(__be32 *)ptr);
67 }
68 
69 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
70 
71 #ifdef __KERNEL__
72 
73 /*
74  * get client id from packed copy.
75  *
76  * this hack is here because the xlog_pack code copies four bytes
77  * of xlog_op_header containing the fields oh_clientid, oh_flags
78  * and oh_res2 into the packed copy.
79  *
80  * later on this four byte chunk is treated as an int and the
81  * client id is pulled out.
82  *
83  * this has endian issues, of course.
84  */
85 static inline uint xlog_get_client_id(__be32 i)
86 {
87  return be32_to_cpu(i) >> 24;
88 }
89 
90 /*
91  * In core log state
92  */
93 #define XLOG_STATE_ACTIVE 0x0001 /* Current IC log being written to */
94 #define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */
95 #define XLOG_STATE_SYNCING 0x0004 /* This IC log is syncing */
96 #define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */
97 #define XLOG_STATE_DO_CALLBACK \
98  0x0010 /* Process callback functions */
99 #define XLOG_STATE_CALLBACK 0x0020 /* Callback functions now */
100 #define XLOG_STATE_DIRTY 0x0040 /* Dirty IC log, not ready for ACTIVE status*/
101 #define XLOG_STATE_IOERROR 0x0080 /* IO error happened in sync'ing log */
102 #define XLOG_STATE_ALL 0x7FFF /* All possible valid flags */
103 #define XLOG_STATE_NOTUSED 0x8000 /* This IC log not being used */
104 #endif /* __KERNEL__ */
105 
106 /*
107  * Flags to log operation header
108  *
109  * The first write of a new transaction will be preceded with a start
110  * record, XLOG_START_TRANS. Once a transaction is committed, a commit
111  * record is written, XLOG_COMMIT_TRANS. If a single region can not fit into
112  * the remainder of the current active in-core log, it is split up into
113  * multiple regions. Each partial region will be marked with a
114  * XLOG_CONTINUE_TRANS until the last one, which gets marked with XLOG_END_TRANS.
115  *
116  */
117 #define XLOG_START_TRANS 0x01 /* Start a new transaction */
118 #define XLOG_COMMIT_TRANS 0x02 /* Commit this transaction */
119 #define XLOG_CONTINUE_TRANS 0x04 /* Cont this trans into new region */
120 #define XLOG_WAS_CONT_TRANS 0x08 /* Cont this trans into new region */
121 #define XLOG_END_TRANS 0x10 /* End a continued transaction */
122 #define XLOG_UNMOUNT_TRANS 0x20 /* Unmount a filesystem transaction */
123 
124 #ifdef __KERNEL__
125 /*
126  * Flags to log ticket
127  */
128 #define XLOG_TIC_INITED 0x1 /* has been initialized */
129 #define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */
130 
131 #define XLOG_TIC_FLAGS \
132  { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \
133  { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }
134 
135 #endif /* __KERNEL__ */
136 
137 #define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */
138 
139 /*
140  * Flags for log structure
141  */
142 #define XLOG_CHKSUM_MISMATCH 0x1 /* used only during recovery */
143 #define XLOG_ACTIVE_RECOVERY 0x2 /* in the middle of recovery */
144 #define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */
145 #define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being
146  shutdown */
147 #define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */
149 typedef __uint32_t xlog_tid_t;
150 
151 #ifdef __KERNEL__
152 /*
153  * Below are states for covering allocation transactions.
154  * By covering, we mean changing the h_tail_lsn in the last on-disk
155  * log write such that no allocation transactions will be re-done during
156  * recovery after a system crash. Recovery starts at the last on-disk
157  * log write.
158  *
159  * These states are used to insert dummy log entries to cover
160  * space allocation transactions which can undo non-transactional changes
161  * after a crash. Writes to a file with space
162  * already allocated do not result in any transactions. Allocations
163  * might include space beyond the EOF. So if we just push the EOF a
164  * little, the last transaction for the file could contain the wrong
165  * size. If there is no file system activity, after an allocation
166  * transaction, and the system crashes, the allocation transaction
167  * will get replayed and the file will be truncated. This could
168  * be hours/days/... after the allocation occurred.
169  *
170  * The fix for this is to do two dummy transactions when the
171  * system is idle. We need two dummy transaction because the h_tail_lsn
172  * in the log record header needs to point beyond the last possible
173  * non-dummy transaction. The first dummy changes the h_tail_lsn to
174  * the first transaction before the dummy. The second dummy causes
175  * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
176  *
177  * These dummy transactions get committed when everything
178  * is idle (after there has been some activity).
179  *
180  * There are 5 states used to control this.
181  *
182  * IDLE -- no logging has been done on the file system or
183  * we are done covering previous transactions.
184  * NEED -- logging has occurred and we need a dummy transaction
185  * when the log becomes idle.
186  * DONE -- we were in the NEED state and have committed a dummy
187  * transaction.
188  * NEED2 -- we detected that a dummy transaction has gone to the
189  * on disk log with no other transactions.
190  * DONE2 -- we committed a dummy transaction when in the NEED2 state.
191  *
192  * There are two places where we switch states:
193  *
194  * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
195  * We commit the dummy transaction and switch to DONE or DONE2,
196  * respectively. In all other states, we don't do anything.
197  *
198  * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
199  *
200  * No matter what state we are in, if this isn't the dummy
201  * transaction going out, the next state is NEED.
202  * So, if we aren't in the DONE or DONE2 states, the next state
203  * is NEED. We can't be finishing a write of the dummy record
204  * unless it was committed and the state switched to DONE or DONE2.
205  *
206  * If we are in the DONE state and this was a write of the
207  * dummy transaction, we move to NEED2.
208  *
209  * If we are in the DONE2 state and this was a write of the
210  * dummy transaction, we move to IDLE.
211  *
212  *
213  * Writing only one dummy transaction can get appended to
214  * one file space allocation. When this happens, the log recovery
215  * code replays the space allocation and a file could be truncated.
216  * This is why we have the NEED2 and DONE2 states before going idle.
217  */
218 
219 #define XLOG_STATE_COVER_IDLE 0
220 #define XLOG_STATE_COVER_NEED 1
221 #define XLOG_STATE_COVER_DONE 2
222 #define XLOG_STATE_COVER_NEED2 3
223 #define XLOG_STATE_COVER_DONE2 4
224 
225 #define XLOG_COVER_OPS 5
226 
227 
228 /* Ticket reservation region accounting */
229 #define XLOG_TIC_LEN_MAX 15
230 
231 /*
232  * Reservation region
233  * As would be stored in xfs_log_iovec but without the i_addr which
234  * we don't care about.
235  */
236 typedef struct xlog_res {
237  uint r_len; /* region length :4 */
238  uint r_type; /* region's transaction type :4 */
239 } xlog_res_t;
240 
241 typedef struct xlog_ticket {
242  struct list_head t_queue; /* reserve/write queue */
243  struct task_struct *t_task; /* task that owns this ticket */
244  xlog_tid_t t_tid; /* transaction identifier : 4 */
245  atomic_t t_ref; /* ticket reference count : 4 */
246  int t_curr_res; /* current reservation in bytes : 4 */
247  int t_unit_res; /* unit reservation in bytes : 4 */
248  char t_ocnt; /* original count : 1 */
249  char t_cnt; /* current count : 1 */
250  char t_clientid; /* who does this belong to; : 1 */
251  char t_flags; /* properties of reservation : 1 */
252  uint t_trans_type; /* transaction type : 4 */
253 
254  /* reservation array fields */
255  uint t_res_num; /* num in array : 4 */
256  uint t_res_num_ophdrs; /* num op hdrs : 4 */
257  uint t_res_arr_sum; /* array sum : 4 */
258  uint t_res_o_flow; /* sum overflow : 4 */
259  xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : 8 * 15 */
260 } xlog_ticket_t;
261 
262 #endif
263 
265 typedef struct xlog_op_header {
266  __be32 oh_tid; /* transaction id of operation : 4 b */
267  __be32 oh_len; /* bytes in data region : 4 b */
268  __u8 oh_clientid; /* who sent me this : 1 b */
269  __u8 oh_flags; /* : 1 b */
270  __u16 oh_res2; /* 32 bit align : 2 b */
272 
273 
274 /* valid values for h_fmt */
275 #define XLOG_FMT_UNKNOWN 0
276 #define XLOG_FMT_LINUX_LE 1
277 #define XLOG_FMT_LINUX_BE 2
278 #define XLOG_FMT_IRIX_BE 3
279 
280 /* our fmt */
281 #ifdef XFS_NATIVE_HOST
282 #define XLOG_FMT XLOG_FMT_LINUX_BE
283 #else
284 #define XLOG_FMT XLOG_FMT_LINUX_LE
285 #endif
287 typedef struct xlog_rec_header {
288  __be32 h_magicno; /* log record (LR) identifier : 4 */
289  __be32 h_cycle; /* write cycle of log : 4 */
290  __be32 h_version; /* LR version : 4 */
291  __be32 h_len; /* len in bytes; should be 64-bit aligned: 4 */
292  __be64 h_lsn; /* lsn of this LR : 8 */
293  __be64 h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */
294  __be32 h_chksum; /* may not be used; non-zero if used : 4 */
295  __be32 h_prev_block; /* block number to previous LR : 4 */
296  __be32 h_num_logops; /* number of log operations in this LR : 4 */
298  /* new fields */
299  __be32 h_fmt; /* format of log record : 4 */
300  uuid_t h_fs_uuid; /* uuid of FS : 16 */
301  __be32 h_size; /* iclog size : 4 */
304 typedef struct xlog_rec_ext_header {
305  __be32 xh_cycle; /* write cycle of log : 4 */
308 
309 #ifdef __KERNEL__
310 
311 /*
312  * Quite misnamed, because this union lays out the actual on-disk log buffer.
313  */
314 typedef union xlog_in_core2 {
315  xlog_rec_header_t hic_header;
316  xlog_rec_ext_header_t hic_xheader;
317  char hic_sector[XLOG_HEADER_SIZE];
318 } xlog_in_core_2_t;
319 
320 /*
321  * - A log record header is 512 bytes. There is plenty of room to grow the
322  * xlog_rec_header_t into the reserved space.
323  * - ic_data follows, so a write to disk can start at the beginning of
324  * the iclog.
325  * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
326  * - ic_next is the pointer to the next iclog in the ring.
327  * - ic_bp is a pointer to the buffer used to write this incore log to disk.
328  * - ic_log is a pointer back to the global log structure.
329  * - ic_callback is a linked list of callback function/argument pairs to be
330  * called after an iclog finishes writing.
331  * - ic_size is the full size of the header plus data.
332  * - ic_offset is the current number of bytes written to in this iclog.
333  * - ic_refcnt is bumped when someone is writing to the log.
334  * - ic_state is the state of the iclog.
335  *
336  * Because of cacheline contention on large machines, we need to separate
337  * various resources onto different cachelines. To start with, make the
338  * structure cacheline aligned. The following fields can be contended on
339  * by independent processes:
340  *
341  * - ic_callback_*
342  * - ic_refcnt
343  * - fields protected by the global l_icloglock
344  *
345  * so we need to ensure that these fields are located in separate cachelines.
346  * We'll put all the read-only and l_icloglock fields in the first cacheline,
347  * and move everything else out to subsequent cachelines.
348  */
349 typedef struct xlog_in_core {
350  wait_queue_head_t ic_force_wait;
351  wait_queue_head_t ic_write_wait;
352  struct xlog_in_core *ic_next;
353  struct xlog_in_core *ic_prev;
354  struct xfs_buf *ic_bp;
355  struct xlog *ic_log;
356  int ic_size;
357  int ic_offset;
358  int ic_bwritecnt;
359  unsigned short ic_state;
360  char *ic_datap; /* pointer to iclog data */
361 
362  /* Callback structures need their own cacheline */
363  spinlock_t ic_callback_lock ____cacheline_aligned_in_smp;
364  xfs_log_callback_t *ic_callback;
365  xfs_log_callback_t **ic_callback_tail;
366 
367  /* reference counts need their own cacheline */
369  xlog_in_core_2_t *ic_data;
370 #define ic_header ic_data->hic_header
371 } xlog_in_core_t;
372 
373 /*
374  * The CIL context is used to aggregate per-transaction details as well be
375  * passed to the iclog for checkpoint post-commit processing. After being
376  * passed to the iclog, another context needs to be allocated for tracking the
377  * next set of transactions to be aggregated into a checkpoint.
378  */
379 struct xfs_cil;
380 
381 struct xfs_cil_ctx {
382  struct xfs_cil *cil;
383  xfs_lsn_t sequence; /* chkpt sequence # */
384  xfs_lsn_t start_lsn; /* first LSN of chkpt commit */
385  xfs_lsn_t commit_lsn; /* chkpt commit record lsn */
386  struct xlog_ticket *ticket; /* chkpt ticket */
387  int nvecs; /* number of regions */
388  int space_used; /* aggregate size of regions */
389  struct list_head busy_extents; /* busy extents in chkpt */
390  struct xfs_log_vec *lv_chain; /* logvecs being pushed */
391  xfs_log_callback_t log_cb; /* completion callback hook. */
392  struct list_head committing; /* ctx committing list */
393 };
394 
395 /*
396  * Committed Item List structure
397  *
398  * This structure is used to track log items that have been committed but not
399  * yet written into the log. It is used only when the delayed logging mount
400  * option is enabled.
401  *
402  * This structure tracks the list of committing checkpoint contexts so
403  * we can avoid the problem of having to hold out new transactions during a
404  * flush until we have a the commit record LSN of the checkpoint. We can
405  * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
406  * sequence match and extract the commit LSN directly from there. If the
407  * checkpoint is still in the process of committing, we can block waiting for
408  * the commit LSN to be determined as well. This should make synchronous
409  * operations almost as efficient as the old logging methods.
410  */
411 struct xfs_cil {
412  struct xlog *xc_log;
413  struct list_head xc_cil;
414  spinlock_t xc_cil_lock;
415  struct xfs_cil_ctx *xc_ctx;
416  struct rw_semaphore xc_ctx_lock;
417  struct list_head xc_committing;
418  wait_queue_head_t xc_commit_wait;
419  xfs_lsn_t xc_current_sequence;
420  struct work_struct xc_push_work;
421  xfs_lsn_t xc_push_seq;
422 };
423 
424 /*
425  * The amount of log space we allow the CIL to aggregate is difficult to size.
426  * Whatever we choose, we have to make sure we can get a reservation for the
427  * log space effectively, that it is large enough to capture sufficient
428  * relogging to reduce log buffer IO significantly, but it is not too large for
429  * the log or induces too much latency when writing out through the iclogs. We
430  * track both space consumed and the number of vectors in the checkpoint
431  * context, so we need to decide which to use for limiting.
432  *
433  * Every log buffer we write out during a push needs a header reserved, which
434  * is at least one sector and more for v2 logs. Hence we need a reservation of
435  * at least 512 bytes per 32k of log space just for the LR headers. That means
436  * 16KB of reservation per megabyte of delayed logging space we will consume,
437  * plus various headers. The number of headers will vary based on the num of
438  * io vectors, so limiting on a specific number of vectors is going to result
439  * in transactions of varying size. IOWs, it is more consistent to track and
440  * limit space consumed in the log rather than by the number of objects being
441  * logged in order to prevent checkpoint ticket overruns.
442  *
443  * Further, use of static reservations through the log grant mechanism is
444  * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
445  * grant) and a significant deadlock potential because regranting write space
446  * can block on log pushes. Hence if we have to regrant log space during a log
447  * push, we can deadlock.
448  *
449  * However, we can avoid this by use of a dynamic "reservation stealing"
450  * technique during transaction commit whereby unused reservation space in the
451  * transaction ticket is transferred to the CIL ctx commit ticket to cover the
452  * space needed by the checkpoint transaction. This means that we never need to
453  * specifically reserve space for the CIL checkpoint transaction, nor do we
454  * need to regrant space once the checkpoint completes. This also means the
455  * checkpoint transaction ticket is specific to the checkpoint context, rather
456  * than the CIL itself.
457  *
458  * With dynamic reservations, we can effectively make up arbitrary limits for
459  * the checkpoint size so long as they don't violate any other size rules.
460  * Recovery imposes a rule that no transaction exceed half the log, so we are
461  * limited by that. Furthermore, the log transaction reservation subsystem
462  * tries to keep 25% of the log free, so we need to keep below that limit or we
463  * risk running out of free log space to start any new transactions.
464  *
465  * In order to keep background CIL push efficient, we will set a lower
466  * threshold at which background pushing is attempted without blocking current
467  * transaction commits. A separate, higher bound defines when CIL pushes are
468  * enforced to ensure we stay within our maximum checkpoint size bounds.
469  * threshold, yet give us plenty of space for aggregation on large logs.
470  */
471 #define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3)
472 #define XLOG_CIL_HARD_SPACE_LIMIT(log) (3 * (log->l_logsize >> 4))
473 
474 /*
475  * ticket grant locks, queues and accounting have their own cachlines
476  * as these are quite hot and can be operated on concurrently.
477  */
478 struct xlog_grant_head {
480  struct list_head waiters;
481  atomic64_t grant;
482 };
483 
484 /*
485  * The reservation head lsn is not made up of a cycle number and block number.
486  * Instead, it uses a cycle number and byte number. Logs don't expect to
487  * overflow 31 bits worth of byte offset, so using a byte number will mean
488  * that round off problems won't occur when releasing partial reservations.
489  */
490 struct xlog {
491  /* The following fields don't need locking */
492  struct xfs_mount *l_mp; /* mount point */
493  struct xfs_ail *l_ailp; /* AIL log is working with */
494  struct xfs_cil *l_cilp; /* CIL log is working with */
495  struct xfs_buf *l_xbuf; /* extra buffer for log
496  * wrapping */
497  struct xfs_buftarg *l_targ; /* buftarg of log */
498  uint l_flags;
499  uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
500  struct list_head *l_buf_cancel_table;
501  int l_iclog_hsize; /* size of iclog header */
502  int l_iclog_heads; /* # of iclog header sectors */
503  uint l_sectBBsize; /* sector size in BBs (2^n) */
504  int l_iclog_size; /* size of log in bytes */
505  int l_iclog_size_log; /* log power size of log */
506  int l_iclog_bufs; /* number of iclog buffers */
507  xfs_daddr_t l_logBBstart; /* start block of log */
508  int l_logsize; /* size of log in bytes */
509  int l_logBBsize; /* size of log in BB chunks */
510 
511  /* The following block of fields are changed while holding icloglock */
513  /* waiting for iclog flush */
514  int l_covered_state;/* state of "covering disk
515  * log entries" */
516  xlog_in_core_t *l_iclog; /* head log queue */
517  spinlock_t l_icloglock; /* grab to change iclog state */
518  int l_curr_cycle; /* Cycle number of log writes */
519  int l_prev_cycle; /* Cycle number before last
520  * block increment */
521  int l_curr_block; /* current logical log block */
522  int l_prev_block; /* previous logical log block */
523 
524  /*
525  * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
526  * read without needing to hold specific locks. To avoid operations
527  * contending with other hot objects, place each of them on a separate
528  * cacheline.
529  */
530  /* lsn of last LR on disk */
532  /* lsn of 1st LR with unflushed * buffers */
534 
535  struct xlog_grant_head l_reserve_head;
536  struct xlog_grant_head l_write_head;
537 
538  /* The following field are used for debugging; need to hold icloglock */
539 #ifdef DEBUG
540  char *l_iclog_bak[XLOG_MAX_ICLOGS];
541 #endif
542 
543 };
544 
545 #define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
546  ((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
547 
548 #define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
549 
550 /* common routines */
551 extern int
553  struct xlog *log);
554 extern int
556  struct xlog *log);
557 extern void
559  struct xlog *log,
560  struct xlog_in_core *iclog,
561  int);
562 
564 struct xlog_ticket *
566  struct xlog *log,
567  int unit_bytes,
568  int count,
569  char client,
570  bool permanent,
571  xfs_km_flags_t alloc_flags);
572 
573 
574 static inline void
575 xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
576 {
577  *ptr += bytes;
578  *len -= bytes;
579  *off += bytes;
580 }
581 
582 void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
583 int
584 xlog_write(
585  struct xlog *log,
586  struct xfs_log_vec *log_vector,
587  struct xlog_ticket *tic,
588  xfs_lsn_t *start_lsn,
589  struct xlog_in_core **commit_iclog,
590  uint flags);
591 
592 /*
593  * When we crack an atomic LSN, we sample it first so that the value will not
594  * change while we are cracking it into the component values. This means we
595  * will always get consistent component values to work from. This should always
596  * be used to sample and crack LSNs that are stored and updated in atomic
597  * variables.
598  */
599 static inline void
600 xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
601 {
602  xfs_lsn_t val = atomic64_read(lsn);
603 
604  *cycle = CYCLE_LSN(val);
605  *block = BLOCK_LSN(val);
606 }
607 
608 /*
609  * Calculate and assign a value to an atomic LSN variable from component pieces.
610  */
611 static inline void
612 xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
613 {
614  atomic64_set(lsn, xlog_assign_lsn(cycle, block));
615 }
616 
617 /*
618  * When we crack the grant head, we sample it first so that the value will not
619  * change while we are cracking it into the component values. This means we
620  * will always get consistent component values to work from.
621  */
622 static inline void
623 xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
624 {
625  *cycle = val >> 32;
626  *space = val & 0xffffffff;
627 }
628 
629 static inline void
630 xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
631 {
632  xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
633 }
634 
635 static inline int64_t
636 xlog_assign_grant_head_val(int cycle, int space)
637 {
638  return ((int64_t)cycle << 32) | space;
639 }
640 
641 static inline void
642 xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
643 {
644  atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
645 }
646 
647 /*
648  * Committed Item List interfaces
649  */
650 int
651 xlog_cil_init(struct xlog *log);
652 void
653 xlog_cil_init_post_recovery(struct xlog *log);
654 void
655 xlog_cil_destroy(struct xlog *log);
656 
657 /*
658  * CIL force routines
659  */
660 xfs_lsn_t
662  struct xlog *log,
664 
665 static inline void
666 xlog_cil_force(struct xlog *log)
667 {
668  xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
669 }
670 
671 /*
672  * Unmount record type is used as a pseudo transaction type for the ticket.
673  * It's value must be outside the range of XFS_TRANS_* values.
674  */
675 #define XLOG_UNMOUNT_REC_TYPE (-1U)
676 
677 /*
678  * Wrapper function for waiting on a wait queue serialised against wakeups
679  * by a spinlock. This matches the semantics of all the wait queues used in the
680  * log code.
681  */
682 static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
683 {
685 
688  spin_unlock(lock);
689  schedule();
690  remove_wait_queue(wq, &wait);
691 }
692 #endif /* __KERNEL__ */
693 
694 #endif /* __XFS_LOG_PRIV_H__ */