59 #define xlog_recover_check_summary(log)
84 xlog_buf_bbcount_valid(
88 return bbcount > 0 && bbcount <= log->l_logBBsize;
103 if (!xlog_buf_bbcount_valid(log, nbblks)) {
104 xfs_warn(log->l_mp,
"Invalid block length (0x%x) for buffer",
126 if (nbblks > 1 && log->l_sectBBsize > 1)
127 nbblks += log->l_sectBBsize;
128 nbblks =
round_up(nbblks, log->l_sectBBsize);
154 xfs_daddr_t
offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
173 if (!xlog_buf_bbcount_valid(log, nbblks)) {
174 xfs_warn(log->l_mp,
"Invalid block length (0x%x) for buffer",
180 blk_no =
round_down(blk_no, log->l_sectBBsize);
181 nbblks =
round_up(nbblks, log->l_sectBBsize);
212 *offset =
xlog_align(log, blk_no, nbblks, bp);
228 xfs_caddr_t orig_offset = bp->
b_addr;
259 if (!xlog_buf_bbcount_valid(log, nbblks)) {
260 xfs_warn(log->l_mp,
"Invalid block length (0x%x) for buffer",
266 blk_no =
round_down(blk_no, log->l_sectBBsize);
267 nbblks =
round_up(nbblks, log->l_sectBBsize);
295 xfs_debug(mp,
"%s: SB : uuid = %pU, fmt = %d\n",
296 __func__, &mp->m_sb.sb_uuid,
XLOG_FMT);
297 xfs_debug(mp,
" log : uuid = %pU, fmt = %d\n",
301 #define xlog_header_check_dump(mp, head)
321 "dirty log written in incompatible format - can't recover");
328 "dirty log entry has mismatched uuid - can't recover");
353 xfs_warn(mp,
"nil uuid in log - IRIX style log");
355 xfs_warn(mp,
"log has mismatched uuid - can't recover");
375 SHUTDOWN_META_IO_ERROR);
391 xfs_daddr_t first_blk,
392 xfs_daddr_t *last_blk,
402 mid_blk =
BLK_AVG(first_blk, end_blk);
403 while (mid_blk != first_blk && mid_blk != end_blk) {
404 error =
xlog_bread(log, mid_blk, 1, bp, &offset);
407 mid_cycle = xlog_get_cycle(offset);
408 if (mid_cycle == cycle)
412 mid_blk =
BLK_AVG(first_blk, end_blk);
414 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
415 (mid_blk == end_blk && mid_blk-1 == first_blk));
433 xfs_daddr_t start_blk,
435 uint stop_on_cycle_no,
436 xfs_daddr_t *new_blk)
451 bufblks = 1 <<
ffs(nbblks);
452 while (bufblks > log->l_logBBsize)
456 if (bufblks < log->l_sectBBsize)
460 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
463 bcount =
min(bufblks, (start_blk + nbblks - i));
469 for (j = 0; j <
bcount; j++) {
470 cycle = xlog_get_cycle(buf);
471 if (cycle == stop_on_cycle_no) {
502 xfs_daddr_t start_blk,
503 xfs_daddr_t *last_blk,
512 int num_blks = *last_blk - start_blk;
515 ASSERT(start_blk != 0 || *last_blk != start_blk);
522 error =
xlog_bread(log, start_blk, num_blks, bp, &offset);
525 offset += ((num_blks - 1) <<
BBSHIFT);
528 for (i = (*last_blk) - 1; i >= 0; i--) {
532 "Log inconsistent (didn't find previous header)");
577 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
587 if (*last_blk - i + extra_bblks !=
612 xfs_daddr_t *return_head_blk)
616 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
618 uint first_half_cycle, last_half_cycle;
620 int error, log_bbnum = log->l_logBBsize;
624 *return_head_blk = first_blk;
632 xfs_warn(log->l_mp,
"totally zeroed log");
637 xfs_warn(log->l_mp,
"empty log check failed");
650 first_half_cycle = xlog_get_cycle(offset);
652 last_blk = head_blk = log_bbnum - 1;
653 error =
xlog_bread(log, last_blk, 1, bp, &offset);
657 last_half_cycle = xlog_get_cycle(offset);
658 ASSERT(last_half_cycle != 0);
671 if (first_half_cycle == last_half_cycle) {
697 head_blk = log_bbnum;
698 stop_on_cycle = last_half_cycle - 1;
722 stop_on_cycle = last_half_cycle;
724 &head_blk, last_half_cycle)))
736 if (head_blk >= num_scan_bblks) {
741 start_blk = head_blk - num_scan_bblks;
743 start_blk, num_scan_bblks,
744 stop_on_cycle, &new_blk)))
777 (xfs_daddr_t) num_scan_bblks >= head_blk);
778 start_blk = log_bbnum - (num_scan_bblks - head_blk);
780 num_scan_bblks - (
int)head_blk,
781 (stop_on_cycle - 1), &new_blk)))
796 start_blk, (
int)head_blk,
797 stop_on_cycle, &new_blk)))
809 if (head_blk >= num_scan_bblks) {
810 start_blk = head_blk - num_scan_bblks;
814 &head_blk, 0)) == -1) {
823 &head_blk, 0)) == -1) {
825 start_blk = log_bbnum - (num_scan_bblks - head_blk);
828 (xfs_daddr_t) log_bbnum-start_blk >= 0);
832 (
int)head_blk)) == -1) {
837 if (new_blk != log_bbnum)
844 if (head_blk == log_bbnum)
845 *return_head_blk = 0;
847 *return_head_blk = head_blk;
860 xfs_warn(log->l_mp,
"failed to find log head");
883 xfs_daddr_t *head_blk,
884 xfs_daddr_t *tail_blk)
891 xfs_daddr_t umount_data_blk;
892 xfs_daddr_t after_umount_blk;
907 if (*head_blk == 0) {
912 if (xlog_get_cycle(offset) == 0) {
923 for (i = (
int)(*head_blk) - 1; i >= 0; i--) {
940 for (i = log->l_logBBsize - 1; i >= (
int)(*head_blk); i--) {
953 xfs_warn(log->l_mp,
"%s: couldn't find sync record", __func__);
972 log->l_prev_block =
i;
973 log->l_curr_block = (
int)*head_blk;
979 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
980 BBTOB(log->l_curr_block));
981 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
982 BBTOB(log->l_curr_block));
995 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1010 after_umount_blk = (i + hblks + (
int)
1013 if (*head_blk == after_umount_blk &&
1015 umount_data_blk = (i + hblks) % log->l_logBBsize;
1016 error =
xlog_bread(log, umount_data_blk, 1, bp, &offset);
1027 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1028 log->l_curr_cycle, after_umount_blk);
1029 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1030 log->l_curr_cycle, after_umount_blk);
1031 *tail_blk = after_umount_blk;
1039 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1069 xfs_warn(log->l_mp,
"failed to locate log tail");
1092 xfs_daddr_t *blk_no)
1096 uint first_cycle, last_cycle;
1097 xfs_daddr_t new_blk, last_blk, start_blk;
1098 xfs_daddr_t num_scan_bblks;
1099 int error, log_bbnum = log->l_logBBsize;
1111 first_cycle = xlog_get_cycle(offset);
1112 if (first_cycle == 0) {
1119 error =
xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1123 last_cycle = xlog_get_cycle(offset);
1124 if (last_cycle != 0) {
1127 }
else if (first_cycle != 1) {
1134 "Log inconsistent or not a log (last==0, first!=1)");
1139 last_blk = log_bbnum-1;
1152 if (last_blk < num_scan_bblks)
1153 num_scan_bblks = last_blk;
1154 start_blk = last_blk - num_scan_bblks;
1163 (
int)num_scan_bblks, 0, &new_blk)))
1173 &last_blk, 0)) == -1) {
1207 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1226 int sectbb = log->l_sectBBsize;
1238 bufblks = 1 <<
ffs(blocks);
1239 while (bufblks > log->l_logBBsize)
1243 if (bufblks < sectbb)
1252 if (balign != start_block) {
1257 j = start_block - balign;
1260 for (i = start_block; i <
end_block; i += bufblks) {
1263 bcount =
min(bufblks, end_block - start_block);
1264 endcount = bcount -
j;
1271 if (j == 0 && (start_block + endcount > ealign)) {
1272 offset = bp->
b_addr +
BBTOB(ealign - start_block);
1280 offset =
xlog_align(log, start_block, endcount, bp);
1281 for (; j < endcount; j++) {
1283 tail_cycle, tail_block);
1286 error =
xlog_bwrite(log, start_block, endcount, bp);
1289 start_block += endcount;
1319 int tail_cycle, head_cycle;
1320 int tail_block, head_block;
1321 int tail_distance, max_distance;
1327 head_cycle = log->l_curr_cycle;
1328 head_block = log->l_curr_block;
1336 if (head_cycle == tail_cycle) {
1344 if (
unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1349 tail_distance = tail_block + (log->l_logBBsize - head_block);
1356 if (
unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1361 tail_distance = tail_block - head_block;
1368 if (tail_distance <= 0) {
1369 ASSERT(tail_distance == 0);
1381 max_distance =
MIN(max_distance, tail_distance);
1383 if ((head_block + max_distance) <= log->l_logBBsize) {
1392 head_block, max_distance, tail_cycle,
1404 distance = log->l_logBBsize - head_block;
1406 head_block, distance, tail_cycle,
1420 distance = max_distance - (log->l_logBBsize - head_block);
1422 tail_cycle, tail_block);
1463 INIT_LIST_HEAD(&trans->
r_itemq);
1465 INIT_HLIST_NODE(&trans->
r_list);
1466 hlist_add_head(&trans->
r_list, head);
1476 INIT_LIST_HEAD(&item->
ri_list);
1488 xfs_caddr_t
ptr, old_ptr;
1491 if (list_empty(&trans->
r_itemq)) {
1506 memcpy(&ptr[old_len], dp, len);
1509 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1539 if (list_empty(&trans->
r_itemq)) {
1542 xfs_warn(log->l_mp,
"%s: bad header magic number",
1571 "bad number of regions (%d) in inode log format",
1587 trace_xfs_log_recover_item_add(log, trans, item, 0);
1606 list_splice_init(&trans->
r_itemq, &sort_list);
1613 trace_xfs_log_recover_item_reorder_head(log,
1623 trace_xfs_log_recover_item_reorder_tail(log,
1629 "%s: unrecognized type of log operation",
1635 ASSERT(list_empty(&sort_list));
1664 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1672 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->
blf_blkno);
1677 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1688 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1715 if (log->l_buf_cancel_table ==
NULL) {
1727 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1747 if (flags & XFS_BLF_CANCEL) {
1770 struct xfs_mount *mp,
1779 int reg_buf_offset = 0;
1780 int reg_buf_bytes = 0;
1781 int next_unlinked_offset;
1786 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1789 for (i = 0; i < inodes_per_buf; i++) {
1790 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1793 while (next_unlinked_offset >=
1794 (reg_buf_offset + reg_buf_bytes)) {
1825 if (next_unlinked_offset < reg_buf_offset)
1830 ASSERT((reg_buf_offset + reg_buf_bytes) <=
1839 next_unlinked_offset - reg_buf_offset;
1840 if (
unlikely(*logged_nextp == 0)) {
1842 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1843 "Trying to replay bad (0) inode di_next_unlinked field.",
1851 next_unlinked_offset);
1852 *buffer_nextp = *logged_nextp;
1866 struct xfs_mount *mp,
1876 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1903 "XFS: NULL dquot in %s.", __func__);
1908 "XFS: dquot too small (%d) in %s.",
1914 "dquot_buf_recover");
1937 struct xfs_mount *mp,
1965 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
1972 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
1982 "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
1990 "%s : ondisk-dquot 0x%p, ID mismatch: "
1991 "0x%x expected, found id 0x%x",
1996 if (!errs && ddq->
d_id) {
2003 "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2014 "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2025 "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2036 xfs_notice(mp,
"Re-initializing dquot ID 0x%x",
id);
2042 ASSERT(flags & XFS_QMOPT_DQREPAIR);
2061 struct xfs_mount *mp,
2069 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2074 if (mp->m_qflags == 0) {
2088 if (log->l_quotaoffs_flag & type)
2124 xfs_mount_t *mp = log->l_mp;
2135 trace_xfs_log_recover_buf_cancel(log, buf_f);
2139 trace_xfs_log_recover_buf_recover(log, buf_f);
2205 xfs_mount_t *mp = log->l_mp;
2234 trace_xfs_log_recover_inode_cancel(log, in_f);
2237 trace_xfs_log_recover_inode_recover(log, in_f);
2260 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2261 __func__, dip, bp, in_f->
ilf_ino);
2271 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2272 __func__, item, in_f->
ilf_ino);
2290 trace_xfs_log_recover_inode_skip(log, in_f);
2305 "%s: Bad regular inode log record, rec ptr 0x%p, "
2306 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2307 __func__, item, dip, bp, in_f->
ilf_ino);
2319 "%s: Bad dir inode log record, rec ptr 0x%p, "
2320 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2321 __func__, item, dip, bp, in_f->
ilf_ino);
2331 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2332 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2333 __func__, item, dip, bp, in_f->
ilf_ino,
2344 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2345 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2355 "%s: Bad inode log record length %d, rec ptr 0x%p",
2384 goto write_inode_buffer;
2392 switch (fields & XFS_ILOG_DFORK) {
2408 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2443 xfs_warn(log->l_mp,
"%s: Invalid flag", __func__);
2498 xfs_mount_t *mp = log->l_mp;
2509 if (mp->m_qflags == 0)
2513 if (recddq ==
NULL) {
2514 xfs_alert(log->l_mp,
"NULL dquot in %s.", __func__);
2518 xfs_alert(log->l_mp,
"dquot too small (%d) in %s.",
2528 if (log->l_quotaoffs_flag & type)
2544 "xlog_recover_dquot_pass2 (log copy)");
2549 error = xfs_trans_read_buf(mp,
NULL, mp->m_ddev_targp, dq_f->
qlf_blkno,
2563 "xlog_recover_dquot_pass2");
2594 xfs_mount_t *mp = log->l_mp;
2595 xfs_efi_log_item_t *efip;
2602 &(efip->efi_format)))) {
2608 spin_lock(&log->l_ailp->xa_lock);
2612 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2631 xfs_efi_log_item_t *efip =
NULL;
2632 xfs_log_item_t *
lip;
2635 struct xfs_ail *ailp = log->l_ailp;
2639 ((efd_formatp->
efd_nextents - 1) *
sizeof(xfs_extent_32_t)))) ||
2650 while (lip !=
NULL) {
2652 efip = (xfs_efi_log_item_t *)lip;
2653 if (efip->efi_format.efi_id == efi_id) {
2658 xfs_trans_ail_delete(ailp, lip,
2659 SHUTDOWN_CORRUPT_INCORE);
2688 for (i = 0; i < item->
ri_cnt; i++)
2718 xfs_warn(log->l_mp,
"%s: invalid item type (%d)",
2749 xfs_warn(log->l_mp,
"%s: invalid item type (%d)",
2768 int error = 0, error2;
2772 hlist_del(&trans->
r_list);
2785 &buffer_list, item);
2799 return error ? error : error2;
2808 xfs_warn(log->l_mp,
"%s: Unmount LR", __func__);
2845 while ((dp < lp) && num_logops) {
2851 xfs_warn(log->l_mp,
"%s: bad clientid 0x%x",
2859 if (trans ==
NULL) {
2865 xfs_warn(log->l_mp,
"%s: bad length 0x%x",
2866 __func__, be32_to_cpu(ohead->
oh_len));
2881 case XLOG_WAS_CONT_TRANS:
2887 xfs_warn(log->l_mp,
"%s: bad transaction",
2898 xfs_warn(log->l_mp,
"%s: bad flag 0x%x",
2920 xfs_efi_log_item_t *efip)
2922 xfs_efd_log_item_t *efdp;
2936 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2937 extp = &(efip->efi_format.efi_extents[
i]);
2940 if ((startblock_fsb == 0) ||
2942 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
2943 (extp->
ext_len >= mp->m_sb.sb_agblocks)) {
2959 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2960 extp = &(efip->efi_format.efi_extents[
i]);
2968 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
2999 xfs_log_item_t *
lip;
3000 xfs_efi_log_item_t *efip;
3008 while (lip !=
NULL) {
3024 efip = (xfs_efi_log_item_t *)lip;
3025 if (
test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3084 xfs_warn(mp,
"%s: failed to clear agi %d. Continuing.", __func__, agno);
3090 struct xfs_mount *mp,
3097 struct xfs_inode *
ip;
3113 ASSERT(ip->i_d.di_nlink == 0);
3114 ASSERT(ip->i_d.di_mode != 0);
3124 ip->i_d.di_dmevmask = 0;
3174 mp_dmevmask = mp->m_dmevmask;
3177 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3207 agno, agino, bucket);
3213 mp->m_dmevmask = mp_dmevmask;
3221 struct xlog_in_core *iclog,
3228 up = (
__be32 *)iclog->ic_datap;
3230 for (i = 0; i < (size >> 2); i++) {
3237 #define xlog_pack_data_checksum(log, iclog, size)
3246 struct xlog_in_core *iclog,
3250 int size = iclog->ic_offset + roundoff;
3258 dp = iclog->ic_datap;
3259 for (i = 0; i <
BTOBB(size) &&
3261 iclog->ic_header.h_cycle_data[
i] = *(
__be32 *)dp;
3262 *(
__be32 *)dp = cycle_lsn;
3266 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3267 xlog_in_core_2_t *xhdr = iclog->ic_data;
3269 for ( ; i <
BTOBB(size); i++) {
3272 xhdr[
j].hic_xheader.xh_cycle_data[
k] = *(
__be32 *)dp;
3273 *(
__be32 *)dp = cycle_lsn;
3277 for (i = 1; i < log->l_iclog_heads; i++) {
3278 xhdr[
i].hic_xheader.xh_cycle = cycle_lsn;
3297 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3298 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3302 *(
__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3324 xfs_warn(log->l_mp,
"%s: unrecognised log version (%d).",
3336 if (
unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3355 xfs_daddr_t head_blk,
3356 xfs_daddr_t tail_blk,
3363 int error = 0, h_size;
3364 int bblks, split_bblks;
3365 int hblks, split_hblks, wrapped_hblks;
3368 ASSERT(head_blk != tail_blk);
3374 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3384 error =
xlog_bread(log, tail_blk, 1, hbp, &offset);
3404 ASSERT(log->l_sectBBsize == 1);
3418 memset(rhash, 0,
sizeof(rhash));
3419 if (tail_blk <= head_blk) {
3420 for (blk_no = tail_blk; blk_no < head_blk; ) {
3421 error =
xlog_bread(log, blk_no, hblks, hbp, &offset);
3432 error =
xlog_bread(log, blk_no + hblks, bblks, dbp,
3439 rhash, rhead, offset, pass)))
3441 blk_no += bblks + hblks;
3450 while (blk_no < log->l_logBBsize) {
3457 if (blk_no + hblks <= log->l_logBBsize) {
3465 if (blk_no != log->l_logBBsize) {
3468 split_hblks = log->l_logBBsize - (
int)blk_no;
3489 wrapped_hblks = hblks - split_hblks;
3492 offset +
BBTOB(split_hblks));
3498 split_hblks ? blk_no : 0);
3506 if (blk_no + bblks <= log->l_logBBsize) {
3516 if (blk_no != log->l_logBBsize) {
3522 log->l_logBBsize - (
int)blk_no;
3544 bblks - split_bblks, dbp,
3545 offset +
BBTOB(split_bblks));
3551 rhead, offset, pass)))
3556 ASSERT(blk_no >= log->l_logBBsize);
3557 blk_no -= log->l_logBBsize;
3560 while (blk_no < head_blk) {
3561 error =
xlog_bread(log, blk_no, hblks, hbp, &offset);
3571 error =
xlog_bread(log, blk_no+hblks, bblks, dbp,
3578 rhead, offset, pass)))
3580 blk_no += bblks + hblks;
3607 xfs_daddr_t head_blk,
3608 xfs_daddr_t tail_blk)
3612 ASSERT(head_blk != tail_blk);
3622 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3628 log->l_buf_cancel_table =
NULL;
3642 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3647 log->l_buf_cancel_table =
NULL;
3658 xfs_daddr_t head_blk,
3659 xfs_daddr_t tail_blk)
3675 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3709 sbp = &log->l_mp->m_sb;
3712 ASSERT(xfs_sb_good_version(sbp));
3716 xfs_icsb_reinit_counters(log->l_mp);
3734 xfs_daddr_t head_blk, tail_blk;
3741 if (tail_blk != head_blk) {
3757 xfs_notice(log->l_mp,
"Starting recovery (logdev: %s)",
3758 log->l_mp->m_logname ? log->l_mp->m_logname
3792 xfs_alert(log->l_mp,
"Failed to recover EFIs");
3807 xfs_notice(log->l_mp,
"Ending recovery (logdev: %s)",
3808 log->l_mp->m_logname ? log->l_mp->m_logname
3812 xfs_info(log->l_mp,
"Ending clean mount");
3832 __uint64_t freeblks;
3842 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3845 xfs_alert(mp,
"%s agf read failed agno %d error %d",
3846 __func__, agno, error);
3851 xfs_buf_relse(agfbp);
3856 xfs_alert(mp,
"%s agi read failed agno %d error %d",
3857 __func__, agno, error);
3863 xfs_buf_relse(agibp);