44 struct xlog_ticket *ticket,
45 struct xlog_in_core **iclog,
52 xfs_daddr_t blk_offset,
61 struct xlog_in_core *iclog);
72 struct xlog_in_core *iclog);
77 struct xlog_in_core **iclog,
78 struct xlog_ticket *ticket,
84 struct xlog_in_core *iclog);
88 struct xlog_in_core *iclog,
93 struct xlog_in_core *iclog);
102 struct xlog_ticket *ticket);
106 struct xlog_ticket *ticket);
119 struct xlog_in_core *iclog,
125 struct xlog_in_core *iclog,
128 #define xlog_verify_dest_ptr(a,b)
129 #define xlog_verify_grant_tail(a)
130 #define xlog_verify_iclog(a,b,c,d)
131 #define xlog_verify_tail_lsn(a,b,c)
139 xlog_grant_sub_space(
150 xlog_crack_grant_head_val(head_val, &cycle, &space);
154 space += log->l_logsize;
159 new = xlog_assign_grant_head_val(cycle, space);
161 }
while (head_val != old);
165 xlog_grant_add_space(
177 xlog_crack_grant_head_val(head_val, &cycle, &space);
179 tmp = log->l_logsize - space;
188 new = xlog_assign_grant_head_val(cycle, space);
190 }
while (head_val != old);
195 struct xlog_grant_head *head)
197 xlog_assign_grant_head(&head->grant, 1, 0);
198 INIT_LIST_HEAD(&head->waiters);
204 struct xlog_grant_head *head)
206 struct xlog_ticket *tic;
208 spin_lock(&head->lock);
211 spin_unlock(&head->lock);
215 xlog_ticket_reservation(
217 struct xlog_grant_head *head,
218 struct xlog_ticket *tic)
220 if (head == &log->l_write_head) {
221 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
222 return tic->t_unit_res;
224 if (tic->t_flags & XLOG_TIC_PERM_RESERV)
225 return tic->t_unit_res * tic->t_cnt;
227 return tic->t_unit_res;
234 struct xlog_grant_head *head,
237 struct xlog_ticket *tic;
241 need_bytes = xlog_ticket_reservation(log, head, tic);
242 if (*free_bytes < need_bytes)
245 *free_bytes -= need_bytes;
246 trace_xfs_log_grant_wake_up(log, tic);
256 struct xlog_grant_head *head,
257 struct xlog_ticket *tic,
263 if (XLOG_FORCED_SHUTDOWN(log))
268 spin_unlock(&head->lock);
272 trace_xfs_log_grant_sleep(log, tic);
274 trace_xfs_log_grant_wake(log, tic);
276 spin_lock(&head->lock);
277 if (XLOG_FORCED_SHUTDOWN(log))
281 list_del_init(&tic->t_queue);
284 list_del_init(&tic->t_queue);
308 struct xlog_grant_head *head,
309 struct xlog_ticket *tic,
323 *need_bytes = xlog_ticket_reservation(log, head, tic);
325 if (!list_empty_careful(&head->waiters)) {
326 spin_lock(&head->lock);
328 free_bytes < *need_bytes) {
332 spin_unlock(&head->lock);
333 }
else if (free_bytes < *need_bytes) {
334 spin_lock(&head->lock);
336 spin_unlock(&head->lock);
343 xlog_tic_reset_res(xlog_ticket_t *tic)
346 tic->t_res_arr_sum = 0;
347 tic->t_res_num_ophdrs = 0;
351 xlog_tic_add_region(xlog_ticket_t *tic,
uint len,
uint type)
353 if (tic->t_res_num == XLOG_TIC_LEN_MAX) {
355 tic->t_res_o_flow += tic->t_res_arr_sum;
357 tic->t_res_arr_sum = 0;
360 tic->t_res_arr[tic->t_res_num].r_len = len;
361 tic->t_res_arr[tic->t_res_num].r_type =
type;
362 tic->t_res_arr_sum += len;
371 struct xfs_mount *
mp,
372 struct xlog_ticket *tic)
374 struct xlog *log = mp->m_log;
378 if (XLOG_FORCED_SHUTDOWN(log))
393 tic->t_curr_res = tic->t_unit_res;
394 xlog_tic_reset_res(tic);
399 trace_xfs_log_regrant(log, tic);
406 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
407 trace_xfs_log_regrant_exit(log, tic);
432 struct xfs_mount *
mp,
435 struct xlog_ticket **ticp,
440 struct xlog *log = mp->m_log;
441 struct xlog_ticket *tic;
447 if (XLOG_FORCED_SHUTDOWN(log))
458 tic->t_trans_type = t_type;
463 trace_xfs_log_reserve(log, tic);
470 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
471 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
472 trace_xfs_log_reserve_exit(log, tic);
511 struct xfs_mount *
mp,
512 struct xlog_ticket *ticket,
513 struct xlog_in_core **iclog,
516 struct xlog *log = mp->m_log;
519 if (XLOG_FORCED_SHUTDOWN(log) ||
524 (((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
527 if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
528 flags |= XFS_LOG_REL_PERM_RESERV;
533 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
534 (flags & XFS_LOG_REL_PERM_RESERV)) {
535 trace_xfs_log_done_nonperm(log, ticket);
544 trace_xfs_log_done_perm(log, ticket);
551 ticket->t_flags |= XLOG_TIC_INITED;
565 struct xfs_mount *
mp,
566 struct xlog_in_core *iclog,
571 spin_lock(&iclog->ic_callback_lock);
572 abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
575 (iclog->ic_state == XLOG_STATE_WANT_SYNC));
577 *(iclog->ic_callback_tail) = cb;
578 iclog->ic_callback_tail = &(cb->
cb_next);
580 spin_unlock(&iclog->ic_callback_lock);
586 struct xfs_mount *
mp,
587 struct xlog_in_core *iclog)
590 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
611 xfs_daddr_t blk_offset,
616 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
617 xfs_notice(mp,
"Mounting Filesystem");
620 "Mounting filesystem in no-recovery mode. Filesystem will be inconsistent.");
621 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
624 mp->m_log =
xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
625 if (IS_ERR(mp->m_log)) {
626 error = -PTR_ERR(mp->m_log);
635 xfs_warn(mp,
"AIL initialisation failed: error %d", error);
638 mp->m_log->l_ailp = mp->m_ail;
644 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
645 int readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
648 mp->m_flags &= ~XFS_MOUNT_RDONLY;
653 mp->m_flags |= XFS_MOUNT_RDONLY;
655 xfs_warn(mp,
"log mount/recovery failed: error %d",
657 goto out_destroy_ail;
694 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
698 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
723 struct xlog *log = mp->m_log;
724 xlog_in_core_t *iclog;
726 xlog_in_core_t *first_iclog;
728 xlog_ticket_t *tic =
NULL;
736 if (mp->m_flags & XFS_MOUNT_RDONLY)
740 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
743 first_iclog = iclog = log->l_iclog;
745 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
746 ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
747 ASSERT(iclog->ic_offset == 0);
749 iclog = iclog->ic_next;
750 }
while (iclog != first_iclog);
752 if (! (XLOG_FORCED_SHUTDOWN(log))) {
754 XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
766 .i_len =
sizeof(
magic),
776 tic->t_curr_res -=
sizeof(
magic);
787 xfs_alert(mp,
"%s: unmount record failed", __func__);
790 spin_lock(&log->l_icloglock);
791 iclog = log->l_iclog;
794 spin_unlock(&log->l_icloglock);
797 spin_lock(&log->l_icloglock);
798 if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
799 iclog->ic_state == XLOG_STATE_DIRTY)) {
800 if (!XLOG_FORCED_SHUTDOWN(log)) {
801 xlog_wait(&iclog->ic_force_wait,
804 spin_unlock(&log->l_icloglock);
807 spin_unlock(&log->l_icloglock);
810 trace_xfs_log_umount_write(log, tic);
828 spin_lock(&log->l_icloglock);
829 iclog = log->l_iclog;
833 spin_unlock(&log->l_icloglock);
836 spin_lock(&log->l_icloglock);
838 if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE
839 || iclog->ic_state == XLOG_STATE_DIRTY
840 || iclog->ic_state == XLOG_STATE_IOERROR) ) {
842 xlog_wait(&iclog->ic_force_wait,
845 spin_unlock(&log->l_icloglock);
868 struct xfs_mount *mp,
869 struct xfs_log_item *
item,
871 const struct xfs_item_ops *ops)
873 item->li_mountp =
mp;
874 item->li_ailp = mp->m_ail;
875 item->li_type =
type;
879 INIT_LIST_HEAD(&item->li_ail);
880 INIT_LIST_HEAD(&item->li_cil);
888 struct xfs_mount *mp)
890 struct xlog *log = mp->m_log;
893 if (XLOG_FORCED_SHUTDOWN(log))
896 if (!list_empty_careful(&log->l_write_head.waiters)) {
899 spin_lock(&log->l_write_head.lock);
902 spin_unlock(&log->l_write_head.lock);
905 if (!list_empty_careful(&log->l_reserve_head.waiters)) {
908 spin_lock(&log->l_reserve_head.lock);
911 spin_unlock(&log->l_reserve_head.lock);
932 struct xlog *log = mp->m_log;
937 spin_lock(&log->l_icloglock);
938 switch (log->l_covered_state) {
939 case XLOG_STATE_COVER_DONE:
940 case XLOG_STATE_COVER_DONE2:
941 case XLOG_STATE_COVER_IDLE:
943 case XLOG_STATE_COVER_NEED:
944 case XLOG_STATE_COVER_NEED2:
947 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
948 log->l_covered_state = XLOG_STATE_COVER_DONE;
950 log->l_covered_state = XLOG_STATE_COVER_DONE2;
957 spin_unlock(&log->l_icloglock);
966 struct xfs_mount *mp)
968 struct xlog *log = mp->m_log;
969 struct xfs_log_item *
lip;
981 tail_lsn = lip->li_lsn;
990 struct xfs_mount *mp)
994 spin_lock(&mp->m_ail->xa_lock);
996 spin_unlock(&mp->m_ail->xa_lock);
1026 xlog_crack_grant_head(head, &head_cycle, &head_bytes);
1027 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1028 tail_bytes =
BBTOB(tail_bytes);
1029 if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
1030 free_bytes = log->l_logsize - (head_bytes - tail_bytes);
1031 else if (tail_cycle + 1 < head_cycle)
1033 else if (tail_cycle < head_cycle) {
1034 ASSERT(tail_cycle == (head_cycle - 1));
1035 free_bytes = tail_bytes - head_bytes;
1042 xfs_alert(log->l_mp,
1043 "xlog_space_left: head behind tail\n"
1044 " tail_cycle = %d, tail_bytes = %d\n"
1045 " GH cycle = %d, GH bytes = %d",
1046 tail_cycle, tail_bytes, head_cycle, head_bytes);
1048 free_bytes = log->l_logsize;
1063 struct xlog_in_core *iclog = bp->
b_fspriv;
1064 struct xlog *
l = iclog->ic_log;
1074 xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR);
1080 aborted = XFS_LI_ABORTED;
1081 }
else if (iclog->ic_state & XLOG_STATE_IOERROR) {
1082 aborted = XFS_LI_ABORTED;
1107 struct xfs_mount *mp,
1113 if (mp->m_logbufs <= 0)
1116 log->l_iclog_bufs = mp->m_logbufs;
1121 if (mp->m_logbsize > 0) {
1122 size = log->l_iclog_size = mp->m_logbsize;
1123 log->l_iclog_size_log = 0;
1125 log->l_iclog_size_log++;
1129 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1137 log->l_iclog_hsize = xhdrs <<
BBSHIFT;
1138 log->l_iclog_heads = xhdrs;
1141 log->l_iclog_hsize =
BBSIZE;
1142 log->l_iclog_heads = 1;
1152 log->l_iclog_hsize =
BBSIZE;
1153 log->l_iclog_heads = 1;
1157 if (mp->m_logbufs == 0)
1158 mp->m_logbufs = log->l_iclog_bufs;
1159 if (mp->m_logbsize == 0)
1160 mp->m_logbsize = log->l_iclog_size;
1171 struct xfs_mount *mp,
1173 xfs_daddr_t blk_offset,
1178 xlog_in_core_t **iclogp;
1179 xlog_in_core_t *iclog, *prev_iclog=
NULL;
1187 xfs_warn(mp,
"Log allocation failed: No memory!");
1192 log->l_targ = log_target;
1193 log->l_logsize =
BBTOB(num_bblks);
1194 log->l_logBBstart = blk_offset;
1195 log->l_logBBsize = num_bblks;
1196 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1199 log->l_prev_block = -1;
1201 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1202 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1203 log->l_curr_cycle = 1;
1209 if (xfs_sb_version_hassector(&mp->m_sb)) {
1210 log2_size = mp->m_sb.sb_logsectlog;
1212 xfs_warn(mp,
"Log sector size too small (0x%x < 0x%x)",
1218 if (log2_size > mp->m_sectbb_log) {
1219 xfs_warn(mp,
"Log sector size too large (0x%x > 0x%x)",
1220 log2_size, mp->m_sectbb_log);
1225 if (log2_size && log->l_logBBstart > 0 &&
1226 !xfs_sb_version_haslogv2(&mp->m_sb)) {
1228 "log sector size (0x%x) invalid for configuration.",
1233 log->l_sectBBsize = 1 << log2_size;
1238 bp = xfs_buf_alloc(mp->m_logdev_targp, 0,
BTOBB(log->l_iclog_size), 0);
1248 iclogp = &log->l_iclog;
1256 ASSERT(log->l_iclog_size >= 4096);
1257 for (i=0; i < log->l_iclog_bufs; i++) {
1260 goto out_free_iclog;
1263 iclog->ic_prev = prev_iclog;
1267 BTOBB(log->l_iclog_size), 0);
1269 goto out_free_iclog;
1273 iclog->ic_data = bp->
b_addr;
1275 log->l_iclog_bak[
i] = (xfs_caddr_t)&(iclog->ic_header);
1277 head = &iclog->ic_header;
1281 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1288 iclog->ic_state = XLOG_STATE_ACTIVE;
1289 iclog->ic_log =
log;
1292 iclog->ic_callback_tail = &(iclog->ic_callback);
1293 iclog->ic_datap = (
char *)iclog->ic_data + log->l_iclog_hsize;
1299 iclogp = &iclog->ic_next;
1301 *iclogp = log->l_iclog;
1302 log->l_iclog->ic_prev = prev_iclog;
1306 goto out_free_iclog;
1310 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1311 prev_iclog = iclog->ic_next;
1321 return ERR_PTR(-error);
1332 struct xlog_ticket *ticket,
1333 struct xlog_in_core **iclog,
1336 struct xfs_mount *mp = log->l_mp;
1349 error =
xlog_write(log, &vec, ticket, commitlsnp, iclog,
1352 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
1373 int threshold_cycle;
1379 free_blocks =
BTOBBT(free_bytes);
1386 free_threshold =
BTOBB(need_bytes);
1387 free_threshold =
MAX(free_threshold, (log->l_logBBsize >> 2));
1388 free_threshold =
MAX(free_threshold, 256);
1389 if (free_blocks >= free_threshold)
1392 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1394 threshold_block += free_threshold;
1395 if (threshold_block >= log->l_logBBsize) {
1396 threshold_block -= log->l_logBBsize;
1397 threshold_cycle += 1;
1399 threshold_lsn = xlog_assign_lsn(threshold_cycle,
1407 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1408 threshold_lsn = last_sync_lsn;
1415 if (!XLOG_FORCED_SHUTDOWN(log))
1431 struct xlog_in_core *iclog = bp->
b_fspriv;
1433 if (iclog->ic_state & XLOG_STATE_IOERROR) {
1477 struct xlog_in_core *iclog)
1487 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
1493 count_init = log->l_iclog_hsize + iclog->ic_offset;
1496 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
1502 roundoff = count - count_init;
1504 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 &&
1505 roundoff < log->l_mp->m_sb.sb_logsunit)
1507 (log->l_mp->m_sb.sb_logsunit <= 1 &&
1508 roundoff <
BBTOB(1)));
1511 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
1512 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
1519 iclog->ic_header.h_len =
1522 iclog->ic_header.h_len =
1535 iclog->ic_bwritecnt = 2;
1537 iclog->ic_bwritecnt = 1;
1545 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
1557 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
1582 bp = iclog->ic_log->l_xbuf;
1585 (
char *)&iclog->ic_header + count, split);
1590 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1600 be32_add_cpu((
__be32 *)dptr, 1);
1602 be32_add_cpu((
__be32 *)dptr, 1);
1629 xlog_in_core_t *iclog, *next_iclog;
1641 iclog = log->l_iclog;
1642 for (i=0; i<log->l_iclog_bufs; i++) {
1644 next_iclog = iclog->ic_next;
1650 log->l_mp->m_log =
NULL;
1659 xlog_state_finish_copy(
1661 struct xlog_in_core *iclog,
1665 spin_lock(&log->l_icloglock);
1667 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1668 iclog->ic_offset += copy_bytes;
1670 spin_unlock(&log->l_icloglock);
1682 struct xfs_mount *mp,
1683 struct xlog_ticket *ticket)
1754 "xlog_write: reservation summary:\n"
1755 " trans type = %s (%u)\n"
1756 " unit res = %d bytes\n"
1757 " current res = %d bytes\n"
1758 " total reg = %u bytes (o/flow = %u bytes)\n"
1759 " ophdrs = %u (ophdr space = %u bytes)\n"
1760 " ophdr + reg = %u bytes\n"
1761 " num regions = %u\n",
1762 ((ticket->t_trans_type <= 0 ||
1764 "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
1765 ticket->t_trans_type,
1768 ticket->t_res_arr_sum, ticket->t_res_o_flow,
1769 ticket->t_res_num_ophdrs, ophdr_spc,
1770 ticket->t_res_arr_sum +
1771 ticket->t_res_o_flow + ophdr_spc,
1774 for (i = 0; i < ticket->t_res_num; i++) {
1775 uint r_type = ticket->t_res_arr[
i].r_type;
1776 xfs_warn(mp,
"region[%u]: %s - %u bytes\n", i,
1778 "bad-rtype" : res_type_str[r_type-1]),
1779 ticket->t_res_arr[i].r_len);
1783 "xlog_write: reservation ran out. Need to up reservation");
1784 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1792 xlog_write_calc_vec_length(
1793 struct xlog_ticket *ticket,
1802 if (ticket->t_flags & XLOG_TIC_INITED)
1805 for (lv = log_vector; lv; lv = lv->
lv_next) {
1812 xlog_tic_add_region(ticket, vecp->
i_len, vecp->
i_type);
1816 ticket->t_res_num_ophdrs += headers;
1827 xlog_write_start_rec(
1829 struct xlog_ticket *ticket)
1831 if (!(ticket->t_flags & XLOG_TIC_INITED))
1840 ticket->t_flags &= ~XLOG_TIC_INITED;
1846 xlog_write_setup_ophdr(
1849 struct xlog_ticket *ticket,
1871 "Bad XFS transaction clientid 0x%x in ticket 0x%p",
1886 xlog_write_setup_copy(
1887 struct xlog_ticket *ticket,
1889 int space_available,
1893 int *last_was_partial_copy,
1894 int *bytes_consumed)
1898 still_to_copy = space_required - *bytes_consumed;
1899 *copy_off = *bytes_consumed;
1901 if (still_to_copy <= space_available) {
1903 *copy_len = still_to_copy;
1905 if (*last_was_partial_copy)
1907 *last_was_partial_copy = 0;
1908 *bytes_consumed = 0;
1913 *copy_len = space_available;
1916 if (*last_was_partial_copy)
1918 *bytes_consumed += *copy_len;
1919 (*last_was_partial_copy)++;
1923 ticket->t_res_num_ophdrs++;
1929 xlog_write_copy_finish(
1931 struct xlog_in_core *iclog,
1936 int *partial_copy_len,
1938 struct xlog_in_core **commit_iclog)
1940 if (*partial_copy) {
1945 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
1952 *partial_copy_len = 0;
1956 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
1960 spin_lock(&log->l_icloglock);
1962 spin_unlock(&log->l_icloglock);
1967 *commit_iclog = iclog;
2017 struct xlog_ticket *ticket,
2019 struct xlog_in_core **commit_iclog,
2022 struct xlog_in_core *iclog =
NULL;
2027 int partial_copy = 0;
2028 int partial_copy_len = 0;
2036 len = xlog_write_calc_vec_length(ticket, log_vector);
2043 if (ticket->t_flags & XLOG_TIC_INITED)
2053 if (ticket->t_curr_res < 0)
2064 &contwr, &log_offset);
2068 ASSERT(log_offset <= iclog->ic_size - 1);
2069 ptr = iclog->ic_datap + log_offset;
2079 while (lv && index < lv->lv_niovecs) {
2087 ASSERT((
unsigned long)ptr %
sizeof(__int32_t) == 0);
2089 start_rec_copy = xlog_write_start_rec(ptr, ticket);
2090 if (start_rec_copy) {
2092 xlog_write_adv_cnt(&ptr, &len, &log_offset,
2096 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
2100 xlog_write_adv_cnt(&ptr, &len, &log_offset,
2103 len += xlog_write_setup_copy(ticket, ophdr,
2104 iclog->ic_size-log_offset,
2106 ©_off, ©_len,
2114 xlog_write_adv_cnt(&ptr, &len, &log_offset, copy_len);
2118 data_cnt += contwr ? copy_len : 0;
2120 error = xlog_write_copy_finish(log, iclog, flags,
2121 &record_cnt, &data_cnt,
2150 if (record_cnt == 0) {
2160 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
2165 *commit_iclog = iclog;
2189 xlog_in_core_t *iclog;
2192 iclog = log->l_iclog;
2194 if (iclog->ic_state == XLOG_STATE_DIRTY) {
2195 iclog->ic_state = XLOG_STATE_ACTIVE;
2196 iclog->ic_offset = 0;
2218 iclog->ic_header.h_num_logops = 0;
2219 memset(iclog->ic_header.h_cycle_data, 0,
2220 sizeof(iclog->ic_header.h_cycle_data));
2221 iclog->ic_header.h_lsn = 0;
2222 }
else if (iclog->ic_state == XLOG_STATE_ACTIVE)
2226 iclog = iclog->ic_next;
2227 }
while (iclog != log->l_iclog);
2238 switch (log->l_covered_state) {
2239 case XLOG_STATE_COVER_IDLE:
2240 case XLOG_STATE_COVER_NEED:
2241 case XLOG_STATE_COVER_NEED2:
2242 log->l_covered_state = XLOG_STATE_COVER_NEED;
2245 case XLOG_STATE_COVER_DONE:
2247 log->l_covered_state = XLOG_STATE_COVER_NEED2;
2249 log->l_covered_state = XLOG_STATE_COVER_NEED;
2252 case XLOG_STATE_COVER_DONE2:
2254 log->l_covered_state = XLOG_STATE_COVER_IDLE;
2256 log->l_covered_state = XLOG_STATE_COVER_NEED;
2269 xlog_in_core_t *lsn_log;
2272 lsn_log = log->l_iclog;
2275 if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
2277 if ((lsn && !lowest_lsn) ||
2278 (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
2282 lsn_log = lsn_log->ic_next;
2283 }
while (lsn_log != log->l_iclog);
2292 struct xlog_in_core *ciclog)
2294 xlog_in_core_t *iclog;
2295 xlog_in_core_t *first_iclog;
2301 int loopdidcallbacks;
2302 int funcdidcallbacks;
2307 spin_lock(&log->l_icloglock);
2308 first_iclog = iclog = log->l_iclog;
2310 funcdidcallbacks = 0;
2322 first_iclog = log->l_iclog;
2323 iclog = log->l_iclog;
2324 loopdidcallbacks = 0;
2330 if (iclog->ic_state &
2331 (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) {
2332 iclog = iclog->ic_next;
2343 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
2354 if (!(iclog->ic_state &
2355 (XLOG_STATE_DONE_SYNC |
2356 XLOG_STATE_DO_CALLBACK))) {
2357 if (ciclog && (ciclog->ic_state ==
2358 XLOG_STATE_DONE_SYNC)) {
2359 ciclog->ic_state = XLOG_STATE_DO_CALLBACK;
2379 XFS_LSN_CMP(lowest_lsn,
2381 iclog = iclog->ic_next;
2386 iclog->ic_state = XLOG_STATE_CALLBACK;
2408 if (iclog->ic_callback)
2415 spin_unlock(&log->l_icloglock);
2424 spin_lock(&iclog->ic_callback_lock);
2425 cb = iclog->ic_callback;
2427 iclog->ic_callback_tail = &(iclog->ic_callback);
2428 iclog->ic_callback =
NULL;
2429 spin_unlock(&iclog->ic_callback_lock);
2432 for (; cb; cb = cb_next) {
2436 spin_lock(&iclog->ic_callback_lock);
2437 cb = iclog->ic_callback;
2443 spin_lock(&log->l_icloglock);
2445 spin_unlock(&iclog->ic_callback_lock);
2446 if (!(iclog->ic_state & XLOG_STATE_IOERROR))
2447 iclog->ic_state = XLOG_STATE_DIRTY;
2458 iclog = iclog->ic_next;
2459 }
while (first_iclog != iclog);
2461 if (repeats > 5000) {
2462 flushcnt += repeats;
2465 "%s: possible infinite loop (%d iterations)",
2466 __func__, flushcnt);
2468 }
while (!ioerrors && loopdidcallbacks);
2475 if (funcdidcallbacks) {
2476 first_iclog = iclog = log->l_iclog;
2478 ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
2488 if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
2489 iclog->ic_state == XLOG_STATE_SYNCING ||
2490 iclog->ic_state == XLOG_STATE_DONE_SYNC ||
2491 iclog->ic_state == XLOG_STATE_IOERROR )
2493 iclog = iclog->ic_next;
2494 }
while (first_iclog != iclog);
2498 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
2500 spin_unlock(&log->l_icloglock);
2522 xlog_in_core_t *iclog,
2525 struct xlog *log = iclog->ic_log;
2527 spin_lock(&log->l_icloglock);
2529 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
2530 iclog->ic_state == XLOG_STATE_IOERROR);
2532 ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2);
2541 if (iclog->ic_state != XLOG_STATE_IOERROR) {
2542 if (--iclog->ic_bwritecnt == 1) {
2543 spin_unlock(&log->l_icloglock);
2546 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2555 spin_unlock(&log->l_icloglock);
2582 struct xlog_in_core **iclogp,
2583 struct xlog_ticket *ticket,
2584 int *continued_write,
2589 xlog_in_core_t *iclog;
2593 spin_lock(&log->l_icloglock);
2594 if (XLOG_FORCED_SHUTDOWN(log)) {
2595 spin_unlock(&log->l_icloglock);
2599 iclog = log->l_iclog;
2600 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2604 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2608 head = &iclog->ic_header;
2611 log_offset = iclog->ic_offset;
2618 if (log_offset == 0) {
2619 ticket->t_curr_res -= log->l_iclog_hsize;
2620 xlog_tic_add_region(ticket,
2625 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2626 ASSERT(log->l_curr_block >= 0);
2648 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) {
2650 spin_unlock(&log->l_icloglock);
2655 spin_unlock(&log->l_icloglock);
2666 if (len <= iclog->ic_size - iclog->ic_offset) {
2667 *continued_write = 0;
2668 iclog->ic_offset += len;
2670 *continued_write = 1;
2675 ASSERT(iclog->ic_offset <= iclog->ic_size);
2676 spin_unlock(&log->l_icloglock);
2678 *logoffsetp = log_offset;
2692 struct xlog_ticket *ticket)
2694 trace_xfs_log_regrant_reserve_enter(log, ticket);
2696 if (ticket->t_cnt > 0)
2699 xlog_grant_sub_space(log, &log->l_reserve_head.grant,
2700 ticket->t_curr_res);
2701 xlog_grant_sub_space(log, &log->l_write_head.grant,
2702 ticket->t_curr_res);
2703 ticket->t_curr_res = ticket->t_unit_res;
2704 xlog_tic_reset_res(ticket);
2706 trace_xfs_log_regrant_reserve_sub(log, ticket);
2709 if (ticket->t_cnt > 0)
2712 xlog_grant_add_space(log, &log->l_reserve_head.grant,
2713 ticket->t_unit_res);
2715 trace_xfs_log_regrant_reserve_exit(log, ticket);
2717 ticket->t_curr_res = ticket->t_unit_res;
2718 xlog_tic_reset_res(ticket);
2739 struct xlog_ticket *ticket)
2743 if (ticket->t_cnt > 0)
2746 trace_xfs_log_ungrant_enter(log, ticket);
2747 trace_xfs_log_ungrant_sub(log, ticket);
2753 bytes = ticket->t_curr_res;
2754 if (ticket->t_cnt > 0) {
2755 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2756 bytes += ticket->t_unit_res*ticket->t_cnt;
2759 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
2760 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
2762 trace_xfs_log_ungrant_exit(log, ticket);
2779 struct xlog_in_core *iclog)
2783 if (iclog->ic_state & XLOG_STATE_IOERROR)
2790 if (iclog->ic_state & XLOG_STATE_IOERROR) {
2791 spin_unlock(&log->l_icloglock);
2794 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
2795 iclog->ic_state == XLOG_STATE_WANT_SYNC);
2797 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
2801 iclog->ic_state = XLOG_STATE_SYNCING;
2802 iclog->ic_header.h_tail_lsn =
cpu_to_be64(tail_lsn);
2806 spin_unlock(&log->l_icloglock);
2831 struct xlog_in_core *iclog,
2834 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
2836 eventual_size = iclog->ic_offset;
2837 iclog->ic_state = XLOG_STATE_WANT_SYNC;
2838 iclog->ic_header.h_prev_block =
cpu_to_be32(log->l_prev_block);
2839 log->l_prev_block = log->l_curr_block;
2840 log->l_prev_cycle = log->l_curr_cycle;
2843 log->l_curr_block +=
BTOBB(eventual_size)+
BTOBB(log->l_iclog_hsize);
2846 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
2847 log->l_mp->m_sb.sb_logsunit > 1) {
2848 __uint32_t sunit_bb =
BTOBB(log->l_mp->m_sb.sb_logsunit);
2849 log->l_curr_block =
roundup(log->l_curr_block, sunit_bb);
2852 if (log->l_curr_block >= log->l_logBBsize) {
2853 log->l_curr_cycle++;
2855 log->l_curr_cycle++;
2856 log->l_curr_block -= log->l_logBBsize;
2857 ASSERT(log->l_curr_block >= 0);
2859 ASSERT(iclog == log->l_iclog);
2860 log->l_iclog = iclog->ic_next;
2892 struct xfs_mount *mp,
2896 struct xlog *log = mp->m_log;
2897 struct xlog_in_core *iclog;
2902 xlog_cil_force(log);
2904 spin_lock(&log->l_icloglock);
2906 iclog = log->l_iclog;
2907 if (iclog->ic_state & XLOG_STATE_IOERROR) {
2908 spin_unlock(&log->l_icloglock);
2915 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2916 iclog->ic_state == XLOG_STATE_DIRTY) {
2924 if (iclog->ic_state == XLOG_STATE_DIRTY ||
2926 && iclog->ic_offset == 0)) {
2927 iclog = iclog->ic_prev;
2928 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2929 iclog->ic_state == XLOG_STATE_DIRTY)
2944 spin_unlock(&log->l_icloglock);
2951 spin_lock(&log->l_icloglock);
2953 iclog->ic_state != XLOG_STATE_DIRTY)
2974 if (flags & XFS_LOG_SYNC) {
2981 if (iclog->ic_state & XLOG_STATE_IOERROR) {
2982 spin_unlock(&log->l_icloglock);
2986 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
2992 if (iclog->ic_state & XLOG_STATE_IOERROR)
2999 spin_unlock(&log->l_icloglock);
3016 trace_xfs_log_force(mp, 0);
3019 xfs_warn(mp,
"%s: error %d returned.", __func__, error);
3039 struct xfs_mount *mp,
3044 struct xlog *log = mp->m_log;
3045 struct xlog_in_core *iclog;
3046 int already_slept = 0;
3057 spin_lock(&log->l_icloglock);
3058 iclog = log->l_iclog;
3059 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3060 spin_unlock(&log->l_icloglock);
3066 iclog = iclog->ic_next;
3070 if (iclog->ic_state == XLOG_STATE_DIRTY) {
3071 spin_unlock(&log->l_icloglock);
3075 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3094 if (!already_slept &&
3095 (iclog->ic_prev->ic_state &
3096 (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
3097 ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3101 xlog_wait(&iclog->ic_prev->ic_write_wait,
3110 spin_unlock(&log->l_icloglock);
3115 spin_lock(&log->l_icloglock);
3118 if ((flags & XFS_LOG_SYNC) &&
3120 (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3125 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3126 spin_unlock(&log->l_icloglock);
3130 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3136 if (iclog->ic_state & XLOG_STATE_IOERROR)
3142 spin_unlock(&log->l_icloglock);
3146 }
while (iclog != log->l_iclog);
3148 spin_unlock(&log->l_icloglock);
3165 trace_xfs_log_force(mp, lsn);
3168 xfs_warn(mp,
"%s: error %d returned.", __func__, error);
3178 struct xlog_in_core *iclog)
3182 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3186 (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
3203 xlog_ticket_t *ticket)
3212 xlog_ticket_t *ticket)
3222 struct xlog_ticket *
3231 struct xlog_ticket *tic;
3294 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3295 num_headers =
howmany(unit_bytes, iclog_space);
3301 while (!num_headers ||
3302 howmany(unit_bytes, iclog_space) > num_headers) {
3306 unit_bytes += log->l_iclog_hsize * num_headers;
3309 unit_bytes += log->l_iclog_hsize;
3312 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
3313 log->l_mp->m_sb.sb_logsunit > 1) {
3315 unit_bytes += 2*log->l_mp->m_sb.sb_logsunit;
3323 INIT_LIST_HEAD(&tic->t_queue);
3324 tic->t_unit_res = unit_bytes;
3325 tic->t_curr_res = unit_bytes;
3329 tic->t_clientid =
client;
3330 tic->t_flags = XLOG_TIC_INITED;
3331 tic->t_trans_type = 0;
3333 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3335 xlog_tic_reset_res(tic);
3361 for (i = 0; i < log->l_iclog_bufs; i++) {
3362 if (ptr >= log->l_iclog_bak[i] &&
3363 ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
3368 xfs_emerg(log->l_mp,
"%s: invalid ptr", __func__);
3386 int tail_cycle, tail_blocks;
3389 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3390 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3391 if (tail_cycle != cycle) {
3392 if (cycle - 1 != tail_cycle &&
3395 "%s: cycle - 1 != tail_cycle", __func__);
3399 if (space >
BBTOB(tail_blocks) &&
3402 "%s: space > BBTOB(tail_blocks)", __func__);
3412 struct xlog_in_core *iclog,
3417 if (
CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3419 log->l_logBBsize - (log->l_prev_block -
BLOCK_LSN(tail_lsn));
3420 if (blocks <
BTOBB(iclog->ic_offset)+
BTOBB(log->l_iclog_hsize))
3421 xfs_emerg(log->l_mp,
"%s: ran out of log space", __func__);
3425 if (
BLOCK_LSN(tail_lsn) == log->l_prev_block)
3426 xfs_emerg(log->l_mp,
"%s: tail wrapped", __func__);
3428 blocks =
BLOCK_LSN(tail_lsn) - log->l_prev_block;
3429 if (blocks <
BTOBB(iclog->ic_offset) + 1)
3430 xfs_emerg(log->l_mp,
"%s: ran out of log space", __func__);
3452 struct xlog_in_core *iclog,
3457 xlog_in_core_t *icptr;
3458 xlog_in_core_2_t *xhdr;
3460 xfs_caddr_t base_ptr;
3461 __psint_t field_offset;
3463 int len,
i,
j,
k, op_len;
3467 spin_lock(&log->l_icloglock);
3468 icptr = log->l_iclog;
3469 for (i=0; i < log->l_iclog_bufs; i++) {
3471 xfs_emerg(log->l_mp,
"%s: invalid ptr", __func__);
3472 icptr = icptr->ic_next;
3474 if (icptr != log->l_iclog)
3475 xfs_emerg(log->l_mp,
"%s: corrupt iclog ring", __func__);
3476 spin_unlock(&log->l_icloglock);
3480 xfs_emerg(log->l_mp,
"%s: invalid magic num", __func__);
3482 ptr = (xfs_caddr_t) &iclog->ic_header;
3483 for (ptr +=
BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) +
count;
3486 xfs_emerg(log->l_mp,
"%s: unexpected magic num",
3492 ptr = iclog->ic_datap;
3495 xhdr = iclog->ic_data;
3496 for (i = 0; i < len; i++) {
3500 field_offset = (__psint_t)
3502 if (syncing == B_FALSE || (field_offset & 0x1ff)) {
3509 clientid = xlog_get_client_id(
3510 xhdr[j].hic_xheader.xh_cycle_data[k]);
3512 clientid = xlog_get_client_id(
3513 iclog->ic_header.h_cycle_data[idx]);
3518 "%s: invalid clientid %d op 0x%p offset 0x%lx",
3519 __func__, clientid, ophead,
3520 (
unsigned long)field_offset);
3523 field_offset = (__psint_t)
3524 ((xfs_caddr_t)&(ophead->
oh_len) - base_ptr);
3525 if (syncing == B_FALSE || (field_offset & 0x1ff)) {
3529 (__psint_t)iclog->ic_datap);
3533 op_len =
be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
3535 op_len =
be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3550 xlog_in_core_t *iclog, *ic;
3552 iclog = log->l_iclog;
3553 if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
3560 ic->ic_state = XLOG_STATE_IOERROR;
3562 }
while (ic != iclog);
3590 struct xfs_mount *mp,
3604 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3614 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
3615 ASSERT(XLOG_FORCED_SHUTDOWN(log));
3627 xlog_cil_force(log);
3633 spin_lock(&log->l_icloglock);
3634 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3651 spin_unlock(&log->l_icloglock);
3663 if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
3671 spin_lock(&log->l_icloglock);
3673 spin_unlock(&log->l_icloglock);
3682 #ifdef XFSERRORDEBUG
3684 xlog_in_core_t *iclog;
3686 spin_lock(&log->l_icloglock);
3687 iclog = log->l_iclog;
3689 ASSERT(iclog->ic_callback == 0);
3690 iclog = iclog->ic_next;
3691 }
while (iclog != log->l_iclog);
3692 spin_unlock(&log->l_icloglock);
3703 xlog_in_core_t *iclog;
3705 iclog = log->l_iclog;
3710 if (iclog->ic_header.h_num_logops)
3712 iclog = iclog->ic_next;
3713 }
while (iclog != log->l_iclog);