56 #define XFS_ITRUNC_MAX_EXTENTS 2
71 return ip->i_d.di_extsize;
73 return ip->i_mount->m_sb.sb_rextsize;
92 for (i = 0; i < nrecs; i++) {
102 #define xfs_validate_extents(ifp, nrecs, fmt)
119 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
121 for (i = 0; i <
j; i++) {
123 i * mp->m_sb.sb_inodesize);
124 if (!dip->di_next_unlinked) {
126 "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
128 ASSERT(dip->di_next_unlinked);
145 struct xfs_mount *
mp,
146 struct xfs_trans *tp,
159 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->
im_blkno,
160 (
int)imap->
im_len, buf_flags, &bp);
164 "%s: xfs_trans_read_buf() returned error %d.",
182 for (i = 0; i < ni; i++) {
187 (i << mp->m_sb.sb_inodelog));
201 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
202 (
unsigned long long)imap->
im_blkno, i,
240 xfs_warn(ip->i_mount,
241 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
242 (
unsigned long long)ip->i_ino,
252 if (
unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
253 xfs_warn(ip->i_mount,
"corrupt dinode %Lu, forkoff = 0x%x.",
254 (
unsigned long long)ip->i_ino,
262 !ip->i_mount->m_rtdev_targp)) {
263 xfs_warn(ip->i_mount,
264 "corrupt dinode %Lu, has realtime flag set.",
271 switch (ip->i_d.di_mode &
S_IFMT) {
282 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
288 switch (dip->di_format) {
294 xfs_warn(ip->i_mount,
295 "corrupt inode %Lu (local format for regular file).",
296 (
unsigned long long) ip->i_ino);
305 xfs_warn(ip->i_mount,
306 "corrupt inode %Lu (bad size %Ld for local inode).",
307 (
unsigned long long) ip->i_ino,
308 (
long long) di_size);
344 switch (dip->di_aformat) {
349 if (
unlikely(size <
sizeof(
struct xfs_attr_sf_hdr))) {
350 xfs_warn(ip->i_mount,
351 "corrupt inode %Lu (bad attr fork size %Ld).",
352 (
unsigned long long) ip->i_ino,
406 xfs_warn(ip->i_mount,
407 "corrupt inode %Lu (bad size %d for local fork, size = %d).",
408 (
unsigned long long) ip->i_ino, size,
464 xfs_warn(ip->i_mount,
"corrupt inode %Lu ((a)extents = %d).",
465 (
unsigned long long) ip->i_ino, nex);
483 for (i = 0; i < nex; i++, dp++) {
540 xfs_warn(ip->i_mount,
"corrupt inode %Lu (btree).",
541 (
unsigned long long) ip->i_ino);
710 error =
xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
717 error =
xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
728 "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)",
747 xfs_alert(mp,
"%s: xfs_iformat() returned error %d",
754 ip->i_d.di_version = dip->di_version;
756 ip->i_d.di_flushiter =
be16_to_cpu(dip->di_flushiter);
778 if (ip->i_d.di_version == 1) {
779 ip->i_d.di_nlink = ip->i_d.di_onlink;
780 ip->i_d.di_onlink = 0;
781 xfs_set_projid(ip, 0);
784 ip->i_delayed_blks = 0;
903 error =
xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
904 ialloc_context, &ino);
907 if (*ialloc_context || ino ==
NULLFSINO) {
919 XFS_ILOCK_EXCL, &ip);
924 ip->i_d.di_mode =
mode;
925 ip->i_d.di_onlink = 0;
926 ip->i_d.di_nlink =
nlink;
927 ASSERT(ip->i_d.di_nlink == nlink);
930 xfs_set_projid(ip, prid);
931 memset(&(ip->i_d.di_pad[0]), 0,
sizeof(ip->i_d.di_pad));
939 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) &&
940 ip->i_d.di_version == 1) {
941 ip->i_d.di_version = 2;
951 if ((prid != 0) && (ip->i_d.di_version == 1))
954 if (pip && XFS_INHERIT_GID(pip)) {
955 ip->i_d.di_gid = pip->i_d.di_gid;
973 ip->i_d.di_nextents = 0;
974 ASSERT(ip->i_d.di_nblocks == 0);
977 ip->i_d.di_mtime.t_sec = (__int32_t)tv.
tv_sec;
978 ip->i_d.di_mtime.t_nsec = (__int32_t)tv.
tv_nsec;
979 ip->i_d.di_atime = ip->i_d.di_mtime;
980 ip->i_d.di_ctime = ip->i_d.di_mtime;
985 ip->i_d.di_extsize = 0;
986 ip->i_d.di_dmevmask = 0;
987 ip->i_d.di_dmstate = 0;
988 ip->i_d.di_flags = 0;
996 ip->i_df.if_u2.if_rdev =
rdev;
997 ip->i_df.if_flags = 0;
1005 if (pip && xfs_inode_is_filestream(pip))
1017 ip->i_d.di_extsize = pip->i_d.di_extsize;
1024 ip->i_d.di_extsize = pip->i_d.di_extsize;
1052 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1053 ip->i_df.if_u1.if_extents =
NULL;
1062 ip->i_d.di_anextents = 0;
1079 xfs_iflags_set(ip, XFS_IFILESTREAM);
1109 struct xfs_trans **tpp,
1110 struct xfs_inode *
ip,
1114 struct xfs_mount *
mp = ip->i_mount;
1115 struct xfs_trans *tp = *tpp;
1116 struct xfs_trans *ntp;
1126 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1128 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1129 ASSERT(new_size <= XFS_ISIZE(ip));
1132 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1133 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1135 trace_xfs_itruncate_extents_start(ip, new_size);
1147 last_block =
XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1148 if (first_unmap_block == last_block)
1151 ASSERT(first_unmap_block < last_block);
1152 unmap_len = last_block - first_unmap_block + 1;
1154 xfs_bmap_init(&free_list, &first_block);
1156 first_unmap_block, unmap_len,
1157 xfs_bmapi_aflag(whichfork),
1159 &first_block, &free_list,
1162 goto out_bmap_cancel;
1172 goto out_bmap_cancel;
1210 trace_xfs_itruncate_extents_end(ip, new_size);
1245 ASSERT(ip->i_d.di_nlink == 0);
1246 ASSERT(ip->i_d.di_mode != 0);
1282 dip->di_next_unlinked = agi->
agi_unlinked[bucket_index];
1283 offset = ip->i_imap.im_boffset +
1321 xfs_dinode_t *last_dip =
NULL;
1323 int offset, last_offset = 0;
1360 xfs_warn(mp,
"%s: xfs_imap_to_bp returned error %d.",
1368 offset = ip->i_imap.im_boffset +
1381 ASSERT(next_agino != agino);
1393 while (next_agino != agino) {
1402 error =
xfs_imap(mp, tp, next_ino, &imap, 0);
1405 "%s: xfs_imap returned error %d.",
1414 "%s: xfs_imap_to_bp returned error %d.",
1420 next_agino =
be32_to_cpu(last_dip->di_next_unlinked);
1432 xfs_warn(mp,
"%s: xfs_imap_to_bp(2) returned error %d.",
1438 ASSERT(next_agino != agino);
1441 offset = ip->i_imap.im_boffset +
1453 last_dip->di_next_unlinked =
cpu_to_be32(next_agino);
1471 xfs_inode_t *free_ip,
1475 xfs_mount_t *
mp = free_ip->i_mount;
1476 int blks_per_cluster;
1483 xfs_inode_log_item_t *iip;
1484 xfs_log_item_t *
lip;
1489 blks_per_cluster = 1;
1490 ninodes = mp->m_sb.sb_inopblock;
1494 mp->m_sb.sb_blocksize;
1495 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
1499 for (j = 0; j < nbufs; j++, inum += ninodes) {
1511 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
1512 mp->m_bsize * blks_per_cluster,
1527 iip = (xfs_inode_log_item_t *)lip;
1528 ASSERT(iip->ili_logged == 1);
1530 xfs_trans_ail_copy_lsn(mp->m_ail,
1531 &iip->ili_flush_lsn,
1532 &iip->ili_item.li_lsn);
1533 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
1535 lip = lip->li_bio_list;
1549 for (i = 0; i < ninodes; i++) {
1568 spin_lock(&ip->i_flags_lock);
1569 if (ip->i_ino != inum + i ||
1570 __xfs_iflags_test(ip, XFS_ISTALE)) {
1571 spin_unlock(&ip->i_flags_lock);
1575 spin_unlock(&ip->i_flags_lock);
1584 if (ip != free_ip &&
1593 xfs_iflags_set(ip, XFS_ISTALE);
1600 if (!iip || xfs_inode_clean(ip)) {
1607 iip->ili_last_fields = iip->ili_fields;
1608 iip->ili_fields = 0;
1609 iip->ili_logged = 1;
1610 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
1611 &iip->ili_item.li_lsn);
1646 xfs_ino_t first_ino;
1650 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1651 ASSERT(ip->i_d.di_nlink == 0);
1652 ASSERT(ip->i_d.di_nextents == 0);
1653 ASSERT(ip->i_d.di_anextents == 0);
1655 ASSERT(ip->i_d.di_nblocks == 0);
1665 error =
xfs_difree(tp, ip->i_ino, flist, &
delete, &first_ino);
1669 ip->i_d.di_mode = 0;
1670 ip->i_d.di_flags = 0;
1671 ip->i_d.di_dmevmask = 0;
1672 ip->i_d.di_forkoff = 0;
1733 struct xfs_mount *
mp = ip->i_mount;
1745 if (rec_diff == 0) {
1769 new_max = cur_max + rec_diff;
1792 new_max = cur_max + rec_diff;
1863 if (byte_diff == 0) {
1871 if (new_size == 0) {
1900 real_size =
roundup(new_size, 4);
1980 struct xfs_inode *
ip)
1982 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1984 trace_xfs_inode_unpin_nowait(ip,
_RET_IP_);
1993 struct xfs_inode *ip)
2010 struct xfs_inode *ip)
2013 __xfs_iunpin_wait(ip);
2040 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2054 for (i = 0; i < nrecs; i++) {
2057 if (isnullstartblock(start_block)) {
2091 xfs_inode_log_item_t *iip,
2098 #ifdef XFS_TRANS_DEBUG
2101 static const short brootflag[2] =
2103 static const short dataflag[2] =
2105 static const short extflag[2] =
2123 if ((iip->ili_fields & dataflag[whichfork]) &&
2133 !(iip->ili_fields & extflag[whichfork]));
2134 if ((iip->ili_fields & extflag[whichfork]) &&
2144 if ((iip->ili_fields & brootflag[whichfork]) &&
2159 xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev);
2167 &ip->i_df.if_u2.if_uuid,
2183 xfs_mount_t *
mp = ip->i_mount;
2185 unsigned long first_index,
mask;
2186 unsigned long inodes_per_cluster;
2188 xfs_inode_t **ilist;
2198 ilist_size = inodes_per_cluster *
sizeof(xfs_inode_t *);
2208 first_index, inodes_per_cluster);
2212 for (i = 0; i < nr_found; i++) {
2223 spin_lock(&ip->i_flags_lock);
2226 spin_unlock(&ip->i_flags_lock);
2229 spin_unlock(&ip->i_flags_lock);
2246 if (!xfs_iflock_nowait(iq)) {
2260 if (!xfs_inode_clean(iq)) {
2265 goto cluster_corrupt_out;
2287 cluster_corrupt_out:
2302 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2304 if (!bufwasdelwri) {
2341 struct xfs_inode *ip,
2344 struct xfs_mount *
mp = ip->i_mount;
2351 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2352 ASSERT(xfs_isiflocked(ip));
2368 if (xfs_iflags_test(ip, XFS_ISTALE)) {
2381 if (XFS_FORCED_SHUTDOWN(mp)) {
2407 if (xfs_buf_ispinned(bp))
2416 goto cluster_corrupt_out;
2423 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2424 cluster_corrupt_out:
2440 xfs_inode_log_item_t *iip;
2443 #ifdef XFS_TRANS_DEBUG
2447 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2448 ASSERT(xfs_isiflocked(ip));
2461 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
2462 __func__, ip->i_ino,
be16_to_cpu(dip->di_magic), dip);
2468 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
2469 __func__, ip->i_ino, ip, ip->i_d.di_magic);
2472 if (
S_ISREG(ip->i_d.di_mode)) {
2478 "%s: Bad regular inode %Lu, ptr 0x%p",
2479 __func__, ip->i_ino, ip);
2482 }
else if (
S_ISDIR(ip->i_d.di_mode)) {
2489 "%s: Bad directory inode %Lu, ptr 0x%p",
2490 __func__, ip->i_ino, ip);
2498 "%s: detected corrupt incore inode %Lu, "
2499 "total extents = %d, nblocks = %Ld, ptr 0x%p",
2500 __func__, ip->i_ino,
2501 ip->i_d.di_nextents + ip->i_d.di_anextents,
2502 ip->i_d.di_nblocks, ip);
2508 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
2509 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
2517 ip->i_d.di_flushiter++;
2529 ip->i_d.di_flushiter = 0;
2537 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
2538 if (ip->i_d.di_version == 1) {
2539 if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
2551 ip->i_d.di_version = 2;
2552 dip->di_version = 2;
2553 ip->i_d.di_onlink = 0;
2555 memset(&(ip->i_d.di_pad[0]), 0,
sizeof(ip->i_d.di_pad));
2556 memset(&(dip->di_pad[0]), 0,
2557 sizeof(dip->di_pad));
2558 ASSERT(xfs_get_projid(ip) == 0);
2592 if (iip !=
NULL && iip->ili_fields != 0) {
2593 iip->ili_last_fields = iip->ili_fields;
2594 iip->ili_fields = 0;
2595 iip->ili_logged = 1;
2597 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2598 &iip->ili_item.li_lsn);
2620 ASSERT(iip->ili_logged == 0);
2621 ASSERT(iip->ili_last_fields == 0);
2622 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
2675 trace_xfs_iext_insert(ip, idx,
new, state,
_RET_IP_);
2679 for (i = idx; i < idx +
count; i++,
new++)
2706 ASSERT((idx >= 0) && (idx <= nextents));
2708 new_size = ifp->
if_bytes + byte_diff;
2715 if (idx < nextents) {
2732 if (idx < nextents) {
2755 if (page_idx < erp->er_extcount) {
2768 erp_idx, page_idx, ext_diff);
2777 int count = ext_diff;
2851 ext_cnt -= ext_diff;
2859 ext_cnt -= ext_diff;
2874 if (nex2 <= ext_avail) {
2881 else if ((erp_idx < nlists - 1) &&
2927 trace_xfs_iext_remove(ip, idx, state,
_RET_IP_);
2933 if (new_size == 0) {
2960 ASSERT(((nextents - ext_diff) > 0) &&
2963 if (idx + ext_diff < nextents) {
2966 (nextents - (idx + ext_diff)) *
3000 if (new_size == 0) {
3005 if (idx + ext_diff < nextents) {
3008 (nextents - (idx + ext_diff)) *
3066 ext_cnt -= ext_diff;
3069 ASSERT(erp_idx < ifp->if_real_bytes /
3090 ext_cnt -= ext_diff;
3109 rnew_size = new_size;
3116 if (new_size == 0) {
3220 ASSERT((new_size >= 0) && (new_size != size));
3221 if (new_size == 0) {
3271 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
3306 if (nextents == 0) {
3319 high = nextents - 1;
3322 while (low <= high) {
3323 idx = (low +
high) >> 1;
3327 if (bno < startoff) {
3329 }
else if (bno >= startoff + blockcount) {
3344 if (bno >= startoff + blockcount) {
3345 if (++idx == nextents) {
3378 while (low <= high) {
3379 erp_idx = (low +
high) >> 1;
3381 erp_next = erp_idx < nlists - 1 ? erp + 1 :
NULL;
3384 }
else if (erp_next && bno >=
3391 *erp_idxp = erp_idx;
3427 while (low <= high) {
3428 erp_idx = (low +
high) >> 1;
3430 prev = erp_idx > 0 ? erp - 1 :
NULL;
3431 if (page_idx < erp->er_extoff || (page_idx == erp->
er_extoff &&
3443 erp = erp_idx < nlists ? erp + 1 :
NULL;
3451 *erp_idxp = erp_idx;
3472 if (nextents == 0) {
3514 for (i = nlists - 1; i > erp_idx; i--) {
3523 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
3527 return (&erp[erp_idx]);
3552 for (i = erp_idx; i < nlists - 1; i++) {
3593 if (nextents == 0) {
3618 while (erp_idx < nlists - 1) {
3660 for (i = erp_idx; i < nlists; i++) {