65 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
192 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
213 struct xfs_mount *
mp,
253 static inline bool xfs_bmap_needs_btree(
struct xfs_inode *
ip,
int whichfork)
263 static inline bool xfs_bmap_wants_extents(
struct xfs_inode *
ip,
int whichfork)
376 if (
S_ISDIR(ip->i_d.di_mode)) {
378 memset(&dargs, 0,
sizeof(dargs));
382 dargs.
total = mp->m_dirblkfsbs;
420 ASSERT(!isnullstartblock(new->br_startblock));
435 new_endoff =
new->br_startoff +
new->br_blockcount;
439 da_old = startblockval(
PREV.br_startblock);
446 if (
PREV.br_startoff == new->br_startoff)
448 if (
PREV.br_startoff +
PREV.br_blockcount == new_endoff)
459 if (isnullstartblock(
LEFT.br_startblock))
464 LEFT.br_startoff +
LEFT.br_blockcount == new->br_startoff &&
465 LEFT.br_startblock +
LEFT.br_blockcount == new->br_startblock &&
466 LEFT.br_state == new->br_state &&
479 if (isnullstartblock(
RIGHT.br_startblock))
484 new_endoff ==
RIGHT.br_startoff &&
485 new->br_startblock + new->br_blockcount ==
RIGHT.br_startblock &&
486 new->br_state ==
RIGHT.br_state &&
492 LEFT.br_blockcount + new->br_blockcount +
RIGHT.br_blockcount
511 LEFT.br_blockcount +
PREV.br_blockcount +
512 RIGHT.br_blockcount);
516 bma->
ip->i_d.di_nextents--;
523 RIGHT.br_blockcount, &i);
554 LEFT.br_blockcount +
PREV.br_blockcount);
563 LEFT.br_startblock,
LEFT.br_blockcount,
585 PREV.br_blockcount +
RIGHT.br_blockcount);
595 RIGHT.br_blockcount, &i);
618 bma->
ip->i_d.di_nextents++;
624 new->br_startblock, new->br_blockcount,
642 trace_xfs_bmap_pre_update(bma->
ip, bma->
idx - 1, state,
_THIS_IP_);
644 LEFT.br_blockcount + new->br_blockcount);
646 PREV.br_startoff + new->br_blockcount);
647 trace_xfs_bmap_post_update(bma->
ip, bma->
idx - 1, state,
_THIS_IP_);
649 temp =
PREV.br_blockcount -
new->br_blockcount;
657 LEFT.br_startblock,
LEFT.br_blockcount,
671 startblockval(
PREV.br_startblock));
685 temp =
PREV.br_blockcount -
new->br_blockcount;
688 bma->
ip->i_d.di_nextents++;
694 new->br_startblock, new->br_blockcount,
715 startblockval(
PREV.br_startblock) -
716 (bma->
cur ? bma->
cur->bc_private.b.allocated : 0));
719 trace_xfs_bmap_post_update(bma->
ip, bma->
idx + 1, state,
_THIS_IP_);
727 temp =
PREV.br_blockcount -
new->br_blockcount;
728 trace_xfs_bmap_pre_update(bma->
ip, bma->
idx + 1, state,
_THIS_IP_);
731 new->br_startoff, new->br_startblock,
732 new->br_blockcount +
RIGHT.br_blockcount,
734 trace_xfs_bmap_post_update(bma->
ip, bma->
idx + 1, state,
_THIS_IP_);
741 RIGHT.br_blockcount, &i);
755 startblockval(
PREV.br_startblock));
768 temp =
PREV.br_blockcount -
new->br_blockcount;
772 bma->
ip->i_d.di_nextents++;
778 new->br_startblock, new->br_blockcount,
799 startblockval(
PREV.br_startblock) -
800 (bma->
cur ? bma->
cur->bc_private.b.allocated : 0));
829 temp =
new->br_startoff -
PREV.br_startoff;
830 temp2 =
PREV.br_startoff +
PREV.br_blockcount - new_endoff;
835 RIGHT.br_startblock = nullstartblock(
837 RIGHT.br_startoff = new_endoff;
841 bma->
ip->i_d.di_nextents++;
847 new->br_startblock, new->br_blockcount,
869 diff = (
int)(temp + temp2 - startblockval(
PREV.br_startblock) -
870 (bma->
cur ? bma->
cur->bc_private.b.allocated : 0));
872 error = xfs_icsb_modify_counters(bma->
ip->i_mount,
874 -((int64_t)diff), 0);
883 trace_xfs_bmap_pre_update(bma->
ip, bma->
idx + 2, state,
_THIS_IP_);
885 nullstartblock((
int)temp2));
886 trace_xfs_bmap_post_update(bma->
ip, bma->
idx + 2, state,
_THIS_IP_);
889 da_new = temp +
temp2;
919 if (da_old || da_new) {
922 temp += bma->
cur->bc_private.b.allocated;
925 xfs_icsb_modify_counters(bma->
ip->i_mount,
927 (int64_t)(da_old - temp), 0);
932 bma->
cur->bc_private.b.allocated = 0;
948 struct xfs_trans *tp,
977 ASSERT(!isnullstartblock(new->br_startblock));
991 newext =
new->br_state;
995 new_endoff =
new->br_startoff +
new->br_blockcount;
1003 if (
PREV.br_startoff == new->br_startoff)
1005 if (
PREV.br_startoff +
PREV.br_blockcount == new_endoff)
1016 if (isnullstartblock(
LEFT.br_startblock))
1021 LEFT.br_startoff +
LEFT.br_blockcount == new->br_startoff &&
1022 LEFT.br_startblock +
LEFT.br_blockcount == new->br_startblock &&
1023 LEFT.br_state == newext &&
1035 if (isnullstartblock(
RIGHT.br_startblock))
1040 new_endoff ==
RIGHT.br_startoff &&
1041 new->br_startblock + new->br_blockcount ==
RIGHT.br_startblock &&
1042 newext ==
RIGHT.br_state &&
1048 LEFT.br_blockcount + new->br_blockcount +
RIGHT.br_blockcount
1065 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1067 LEFT.br_blockcount +
PREV.br_blockcount +
1068 RIGHT.br_blockcount);
1069 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1072 ip->i_d.di_nextents -= 2;
1078 RIGHT.br_startblock,
1079 RIGHT.br_blockcount, &i)))
1096 LEFT.br_blockcount +
PREV.br_blockcount +
1109 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1111 LEFT.br_blockcount +
PREV.br_blockcount);
1112 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1115 ip->i_d.di_nextents--;
1121 PREV.br_startblock,
PREV.br_blockcount,
1133 LEFT.br_blockcount +
PREV.br_blockcount,
1144 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1146 PREV.br_blockcount +
RIGHT.br_blockcount);
1148 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1150 ip->i_d.di_nextents--;
1156 RIGHT.br_startblock,
1157 RIGHT.br_blockcount, &i)))
1168 new->br_blockcount +
RIGHT.br_blockcount,
1180 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1182 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1189 new->br_startblock, new->br_blockcount,
1194 new->br_startblock, new->br_blockcount,
1205 trace_xfs_bmap_pre_update(ip, *idx - 1, state,
_THIS_IP_);
1207 LEFT.br_blockcount + new->br_blockcount);
1209 PREV.br_startoff + new->br_blockcount);
1210 trace_xfs_bmap_post_update(ip, *idx - 1, state,
_THIS_IP_);
1212 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1214 new->br_startblock + new->br_blockcount);
1216 PREV.br_blockcount - new->br_blockcount);
1217 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1226 PREV.br_startblock,
PREV.br_blockcount,
1231 PREV.br_startoff + new->br_blockcount,
1232 PREV.br_startblock + new->br_blockcount,
1233 PREV.br_blockcount - new->br_blockcount,
1240 LEFT.br_blockcount + new->br_blockcount,
1252 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1256 PREV.br_blockcount - new->br_blockcount);
1258 new->br_startblock + new->br_blockcount);
1259 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1262 ip->i_d.di_nextents++;
1268 PREV.br_startblock,
PREV.br_blockcount,
1273 PREV.br_startoff + new->br_blockcount,
1274 PREV.br_startblock + new->br_blockcount,
1275 PREV.br_blockcount - new->br_blockcount,
1290 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1292 PREV.br_blockcount - new->br_blockcount);
1293 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1297 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1299 new->br_startoff, new->br_startblock,
1300 new->br_blockcount +
RIGHT.br_blockcount, newext);
1301 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1309 PREV.br_blockcount, &i)))
1314 PREV.br_blockcount - new->br_blockcount,
1321 new->br_blockcount +
RIGHT.br_blockcount,
1332 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1334 PREV.br_blockcount - new->br_blockcount);
1335 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1340 ip->i_d.di_nextents++;
1346 PREV.br_startblock,
PREV.br_blockcount,
1352 PREV.br_blockcount - new->br_blockcount,
1356 new->br_startblock, new->br_blockcount,
1373 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1375 new->br_startoff -
PREV.br_startoff);
1376 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1381 PREV.br_startoff +
PREV.br_blockcount - new_endoff;
1382 r[1].
br_startblock =
new->br_startblock +
new->br_blockcount;
1388 ip->i_d.di_nextents += 2;
1394 PREV.br_startblock,
PREV.br_blockcount,
1406 new->br_startoff -
PREV.br_startoff;
1416 new->br_startblock, new->br_blockcount,
1448 *logflagsp |= tmp_logflags;
1487 ASSERT(isnullstartblock(new->br_startblock));
1522 new->br_startoff + new->br_blockcount == right.
br_startoff &&
1543 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1546 startblockval(new->br_startblock) +
1550 nullstartblock((
int)newlen));
1551 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1556 case BMAP_LEFT_CONTIG:
1565 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1568 startblockval(new->br_startblock);
1571 nullstartblock((
int)newlen));
1572 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1581 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
1583 oldlen = startblockval(new->br_startblock) +
1588 nullstartblock((
int)newlen), temp, right.
br_state);
1589 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
1598 oldlen = newlen = 0;
1602 if (oldlen != newlen) {
1605 (int64_t)(oldlen - newlen), 0);
1633 ASSERT(!isnullstartblock(new->br_startblock));
1676 new->br_startoff + new->br_blockcount == right.
br_startoff &&
1677 new->br_startblock + new->br_blockcount == right.
br_startblock &&
1728 new->br_blockcount +
1736 case BMAP_LEFT_CONTIG:
1749 rval = xfs_ilog_fext(whichfork);
1776 new->br_startoff, new->br_startblock,
1782 rval = xfs_ilog_fext(whichfork);
1794 new->br_blockcount +
1818 new->br_blockcount, &i);
1822 bma->
cur->bc_rec.b.br_state =
new->br_state;
1832 if (xfs_bmap_needs_btree(bma->
ip, whichfork)) {
1838 0, &tmp_logflags, whichfork);
1846 bma->
cur->bc_private.b.allocated = 0;
1882 orig_off = align_off = *offp;
1883 orig_alen = align_alen = *lenp;
1884 orig_end = orig_off + orig_alen;
1890 if (!delay && !eof &&
1903 temp =
do_mod(orig_off, extsz);
1911 if ((temp = (align_alen % extsz))) {
1912 align_alen += extsz -
temp;
1925 if (align_off != orig_off && align_off < prevo)
1944 align_off + align_alen != orig_end &&
1945 align_off + align_alen > nexto)
1946 align_off = nexto > align_alen ? nexto - align_alen : 0;
1953 if (align_off != orig_off && align_off < prevo)
1955 if (align_off + align_alen != orig_end &&
1956 align_off + align_alen > nexto &&
1959 align_alen = nexto - align_off;
1966 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
1971 if (orig_off < align_off ||
1972 orig_end > align_off + align_alen ||
1973 align_alen - temp < orig_alen)
1978 if (align_off + temp <= orig_off) {
1985 else if (align_off + align_alen - temp >= orig_end)
1991 align_alen -= orig_off - align_off;
1992 align_off = orig_off;
1993 align_alen -= align_alen % mp->m_sb.sb_rextsize;
1998 if (orig_off < align_off || orig_end > align_off + align_alen)
2001 ASSERT(orig_off >= align_off);
2002 ASSERT(orig_end <= align_off + align_alen);
2017 #define XFS_ALLOC_GAP_UNITS 4
2029 #define ISVALID(x,y) \
2031 (x) < mp->m_sb.sb_rblocks : \
2032 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2033 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2034 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2036 mp = ap->
ip->i_mount;
2045 !isnullstartblock(ap->
prev.br_startblock) &&
2047 ap->
prev.br_startblock)) {
2048 ap->
blkno = ap->
prev.br_startblock + ap->
prev.br_blockcount;
2053 (ap->
prev.br_startoff + ap->
prev.br_blockcount);
2056 ap->
blkno += adjust;
2063 else if (!ap->
eof) {
2074 !isnullstartblock(ap->
prev.br_startblock) &&
2075 (prevbno = ap->
prev.br_startblock +
2076 ap->
prev.br_blockcount) &&
2081 adjust = prevdiff = ap->
offset -
2082 (ap->
prev.br_startoff +
2083 ap->
prev.br_blockcount);
2092 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->
length &&
2094 ap->
prev.br_startblock))
2102 if (!rt && !nullfb &&
2115 if (!isnullstartblock(ap->
got.br_startblock)) {
2119 adjust = gotdiff = ap->
got.br_startoff - ap->
offset;
2124 gotbno = ap->
got.br_startblock;
2132 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->
length &&
2133 ISVALID(gotbno - gotdiff, gotbno))
2137 gotdiff += adjust - ap->
length;
2144 if (!rt && !nullfb &&
2158 ap->
blkno = prevdiff <= gotdiff ? prevbno : gotbno;
2160 ap->
blkno = prevbno;
2179 mp = ap->
ip->i_mount;
2181 prod = align / mp->m_sb.sb_rextsize;
2183 align, 1, ap->
eof, 0,
2199 ralen = ap->
length / mp->m_sb.sb_rextsize;
2207 if (ralen * mp->m_sb.sb_rextsize >=
MAXEXTLEN)
2208 ralen =
MAXEXTLEN / mp->m_sb.sb_rextsize;
2226 ap->
blkno = rtx * mp->m_sb.sb_rextsize;
2241 &ralen, atype, ap->
wasdel, prod, &rtb)))
2245 ap->
length, &ralen, atype,
2250 ap->
blkno *= mp->m_sb.sb_rextsize;
2251 ralen *= mp->m_sb.sb_rextsize;
2253 ap->
ip->i_d.di_nblocks += ralen;
2256 ap->
ip->i_delayed_blks -= ralen;
2276 struct xfs_mount *
mp = ap->
ip->i_mount;
2282 if (ap->
userdata && xfs_inode_is_filestream(ap->
ip))
2298 while (*blen < args->maxlen) {
2314 if (*blen < longest)
2319 if (xfs_inode_is_filestream(ap->
ip)) {
2320 if (*blen >= args->
maxlen)
2347 if (++ag == mp->m_sb.sb_agcount)
2360 if (notinit || *blen < ap->minlen)
2366 else if (*blen < args->maxlen)
2379 if (xfs_inode_is_filestream(ap->
ip))
2404 mp = ap->
ip->i_mount;
2408 align, 0, ap->
eof, 0, ap->
conv,
2416 if (ap->
userdata && xfs_inode_is_filestream(ap->
ip)) {
2439 tryagain = isaligned = 0;
2440 memset(&args, 0,
sizeof(args));
2453 }
else if (ap->
flist->xbf_low) {
2454 if (xfs_inode_is_filestream(ap->
ip))
2513 if (blen > mp->m_dalign && blen <= args.
maxlen)
2514 nextminlen = blen - mp->m_dalign;
2516 nextminlen = args.
minlen;
2517 if (nextminlen + mp->m_dalign > args.
minlen + 1)
2519 nextminlen + mp->m_dalign -
2542 args.
minlen = nextminlen;
2574 ap->
flist->xbf_low = 1;
2584 (ap->
flist->xbf_low &&
2592 (ap->
flist->xbf_low && fb_agno < args.
agno));
2594 ap->
ip->i_d.di_nblocks += args.
len;
2597 ap->
ip->i_delayed_blks -= args.
len;
2672 ip->i_d.di_nblocks--;
2729 ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
2737 ASSERT(got_endoff >= del_endoff);
2756 mp->m_sb.sb_rextsize) == 0);
2758 mp->m_sb.sb_rextsize) == 0);
2761 do_div(bno, mp->m_sb.sb_rextsize);
2762 do_div(len, mp->m_sb.sb_rextsize);
2767 nblks = len * mp->m_sb.sb_rextsize;
2789 da_old = da_new = 0;
2801 (got_endoff == del_endoff)) {
2816 flags |= xfs_ilog_fext(whichfork);
2828 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
2836 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
2841 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
2843 flags |= xfs_ilog_fext(whichfork);
2857 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
2863 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
2867 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
2869 flags |= xfs_ilog_fext(whichfork);
2884 trace_xfs_bmap_pre_update(ip, *idx, state,
_THIS_IP_);
2886 new.br_startoff = del_endoff;
2887 temp2 = got_endoff - del_endoff;
2888 new.br_blockcount =
temp2;
2891 new.br_startblock = del_endblock;
2903 if (error && error !=
ENOSPC)
2944 flags |= xfs_ilog_fext(whichfork);
2952 new.br_startblock = nullstartblock((
int)temp2);
2953 da_new = temp +
temp2;
2954 while (da_new > da_old) {
2959 nullstartblock((
int)temp));
2961 if (da_new == da_old)
2967 nullstartblock((
int)temp2);
2971 trace_xfs_bmap_post_update(ip, *idx, state,
_THIS_IP_);
2986 ip->i_d.di_nblocks -= nblks;
2997 ASSERT(da_old >= da_new);
2998 if (da_old > da_new) {
3000 (int64_t)(da_old - da_new), 0);
3086 memset(&args, 0,
sizeof(args));
3095 args.
fsbno = *firstblock;
3098 args.
fsbno = *firstblock;
3120 ip->i_d.di_nblocks++;
3133 for (cnt = i = 0; i < nextents; i++) {
3142 xfs_btree_set_numrecs(ablock, cnt);
3171 struct xfs_inode *
ip)
3173 struct xfs_mount *
mp = ip->i_mount;
3176 if (mp->m_sb.sb_inodesize == 256) {
3204 if (dfl_forkoff > ip->i_d.di_forkoff)
3205 ip->i_d.di_forkoff = dfl_forkoff;
3242 memset(&args, 0,
sizeof(args));
3244 args.
mp = ip->i_mount;
3256 args.
fsbno = *firstblock;
3270 *firstblock = args.
fsbno;
3279 trace_xfs_bmap_post_update(ip, 0,
3283 ip->i_d.di_nblocks = 1;
3286 flags |= xfs_ilog_fext(whichfork);
3379 "Access to block zero in inode %llu "
3380 "start_block: %llx start_off: %llx "
3381 "blkcnt: %llx extent-state: %x lastx: %x\n",
3382 (
unsigned long long)ip->i_ino,
3409 maxrecs = mp->m_bmap_dmxr[0];
3410 for (level = 0, rval = 0;
3420 maxrecs = mp->m_bmap_dmxr[1];
3448 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
3471 ASSERT(ip->i_d.di_aformat == 0);
3474 ASSERT(ip->i_d.di_anextents == 0);
3479 switch (ip->i_d.di_format) {
3481 ip->i_d.di_forkoff =
roundup(
sizeof(xfs_dev_t), 8) >> 3;
3490 if (!ip->i_d.di_forkoff)
3492 else if (mp->m_flags & XFS_MOUNT_ATTR2)
3505 xfs_bmap_init(&flist, &firstblock);
3506 switch (ip->i_d.di_format) {
3527 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
3528 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
3529 __int64_t sbfields = 0;
3531 spin_lock(&mp->m_sb_lock);
3532 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
3533 xfs_sb_version_addattr(&mp->m_sb);
3536 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
3537 xfs_sb_version_addattr2(&mp->m_sb);
3541 spin_unlock(&mp->m_sb_lock);
3544 spin_unlock(&mp->m_sb_lock);
3582 ASSERT(!isnullstartblock(bno));
3585 ASSERT(agno < mp->m_sb.sb_agcount);
3586 ASSERT(agbno < mp->m_sb.sb_agblocks);
3587 ASSERT(len < mp->m_sb.sb_agblocks);
3588 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
3592 new->xbfi_startblock = bno;
3647 minleafrecs = mp->m_bmap_dmnr[0];
3648 minnoderecs = mp->m_bmap_dmnr[1];
3649 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
3650 for (level = 1; maxblocks > 1; level++) {
3651 if (maxblocks <= maxrootrecs)
3654 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
3656 mp->m_bm_maxlevels[whichfork] =
level;
3674 xfs_efd_log_item_t *
efd;
3675 xfs_efi_log_item_t *
efi;
3678 unsigned int logres;
3679 unsigned int logcount;
3694 logres = ntp->t_log_res;
3695 logcount = ntp->t_log_count;
3730 if (!XFS_FORCED_SHUTDOWN(mp))
3731 xfs_force_shutdown(mp,
3733 SHUTDOWN_CORRUPT_INCORE :
3734 SHUTDOWN_META_IO_ERROR);
3757 for (free = flist->
xbf_first; free; free = next) {
3799 lowest = *first_unused;
3801 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
3807 if (off >= lowest + len && off - max >= len) {
3808 *first_unused =
max;
3814 *first_unused =
max;
3852 bno = *last_block - 1;
3869 struct xfs_trans *tp,
3870 struct xfs_inode *
ip,
3886 if (nextents == 0) {
3917 if (error || is_empty)
3937 struct xfs_inode *
ip,
3960 struct xfs_trans *tp,
3961 struct xfs_inode *
ip,
3979 if (error || is_empty)
4003 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
4015 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
4021 struct xfs_mount *
mp,
4080 while (level-- > 0) {
4109 num_recs = xfs_btree_get_numrecs(block);
4110 if (
unlikely(i + num_recs > room)) {
4111 ASSERT(i + num_recs <= room);
4112 xfs_warn(ip->i_mount,
4113 "corrupt dinode %Lu, (btree extents).",
4114 (
unsigned long long) ip->i_ino);
4133 for (j = 0; j < num_recs; j++, i++, frp++) {
4145 start, num_recs))) {
4178 xfs_bmap_trace_exlist(
4182 unsigned long caller_ip)
4193 for (idx = 0; idx <
cnt; idx++)
4194 trace_xfs_extlist(ip, idx, whichfork, caller_ip);
4214 ASSERT(ret_nmap <= nmap);
4216 for (i = 0; i < ret_nmap; i++) {
4217 ASSERT(mval[i].br_blockcount > 0);
4219 ASSERT(mval[i].br_startoff >= bno);
4220 ASSERT(mval[i].br_blockcount <= len);
4221 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
4224 ASSERT(mval[i].br_startoff < bno + len);
4225 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
4229 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
4230 mval[i].br_startoff);
4264 ASSERT((*bno >= obno) || (n == 0));
4325 }
else if (*n > 0 &&
4332 }
else if (!((*n == 0) &&
4346 struct xfs_inode *ip,
4353 struct xfs_mount *
mp = ip->i_mount;
4378 if (XFS_FORCED_SHUTDOWN(mp))
4395 while (bno < end && n < *nmap) {
4418 if (bno >= end || n >= *nmap)
4433 struct xfs_inode *ip,
4441 struct xfs_mount *
mp = ip->i_mount;
4464 1, 0, &aoff, &alen);
4469 extsz = alen / mp->m_sb.sb_rextsize;
4490 -((int64_t)extsz), 0);
4493 -((int64_t)alen), 0);
4497 goto out_unreserve_quota;
4500 -((int64_t)indlen), 0);
4502 goto out_unreserve_blocks;
4505 ip->i_delayed_blks += alen;
4525 out_unreserve_blocks:
4530 out_unreserve_quota:
4532 xfs_trans_unreserve_quota_nblks(
NULL, ip, (
long)alen, 0, rt ?
4542 struct xfs_inode *ip,
4549 struct xfs_mount *
mp = ip->i_mount;
4572 if (XFS_FORCED_SHUTDOWN(mp))
4587 while (bno < end && n < *nmap) {
4590 &prev, &lastx, eof);
4605 if (bno >= end || n >= *nmap)
4625 struct xfs_mount *
mp = bma->
ip->i_mount;
4629 int tmp_logflags = 0;
4670 if (mp->m_dalign && bma->
length >= mp->m_dalign &&
4684 if (bma->
flist->xbf_low)
4693 bma->
cur->bc_private.b.flist = bma->
flist;
4702 bma->
cur->bc_private.b.flags =
4706 bma->
got.br_startblock = bma->
blkno;
4715 xfs_sb_version_hasextflgbit(&mp->m_sb))
4735 ASSERT(bma->
got.br_startoff + bma->
got.br_blockcount >=
4743 xfs_bmapi_allocate_worker(
4748 unsigned long pflags;
4791 int tmp_logflags = 0;
4811 bma->
ip, whichfork);
4813 bma->
cur->bc_private.b.flist = bma->
flist;
4855 struct xfs_trans *tp,
4856 struct xfs_inode *ip,
4867 struct xfs_mount *
mp = ip->i_mount;
4911 if (XFS_FORCED_SHUTDOWN(mp))
4953 while (bno < end && n < *nmap) {
4954 inhole = eof || bma.
got.br_startoff > bno;
4955 wasdelay = !inhole && isnullstartblock(bma.
got.br_startblock);
4961 if (inhole || wasdelay) {
5008 if (bno >= end || n >= *nmap || bma.
nallocs >= *nmap)
5024 if (xfs_bmap_wants_extents(ip, whichfork)) {
5025 int tmp_logflags = 0;
5029 &tmp_logflags, whichfork);
5044 if ((bma.
logflags & xfs_ilog_fext(whichfork)) &&
5046 bma.
logflags &= ~xfs_ilog_fext(whichfork);
5047 else if ((bma.
logflags & xfs_ilog_fbroot(whichfork)) &&
5049 bma.
logflags &= ~xfs_ilog_fbroot(whichfork);
5063 bma.
cur->bc_private.b.firstblock) ||
5067 bma.
cur->bc_private.b.firstblock)));
5068 *firstblock = bma.
cur->bc_private.b.firstblock;
5088 struct xfs_inode *ip,
5119 trace_xfs_bunmap(ip, bno, len, flags,
_RET_IP_);
5132 if (XFS_FORCED_SHUTDOWN(mp))
5142 if (nextents == 0) {
5149 bno = start + len - 1;
5181 while (bno != (
xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5182 (nexts == 0 || extno < nexts)) {
5218 (mod =
do_mod(sum, mp->m_sb.sb_rextsize))) {
5227 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5247 ASSERT(xfs_trans_get_block_res(tp) > 0);
5259 &lastx, &cur, &del, firstblock, flist,
5271 mod = mp->m_sb.sb_rextsize -
mod;
5278 xfs_trans_get_block_res(tp) == 0)) ||
5279 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5317 ip, &lastx, &cur, &prev,
5318 firstblock, flist, &logflags);
5326 ip, &lastx, &cur, &del,
5327 firstblock, flist, &logflags);
5340 do_div(rtexts, mp->m_sb.sb_rextsize);
5342 (int64_t)rtexts, 0);
5370 if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
5381 &tmp_logflags, whichfork);
5382 logflags |= tmp_logflags;
5403 *done = bno == (
xfs_fileoff_t)-1 || bno < start || lastx < 0;
5408 if (xfs_bmap_needs_btree(ip, whichfork)) {
5411 &cur, 0, &tmp_logflags, whichfork);
5412 logflags |= tmp_logflags;
5419 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5423 logflags |= tmp_logflags;
5436 if ((logflags & xfs_ilog_fext(whichfork)) &&
5438 logflags &= ~xfs_ilog_fext(whichfork);
5439 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5441 logflags &= ~xfs_ilog_fbroot(whichfork);
5514 xfs_bmap_format_t formatter,
5547 ip->i_d.di_aformat != 0 &&
5565 fixlen = mp->m_super->s_maxbytes;
5568 fixlen = XFS_ISIZE(ip);
5593 out = kmem_zalloc_large(bmv->
bmv_count *
5601 if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
5604 goto out_unlock_iolock;
5624 bmapi_flags = xfs_bmapi_aflag(whichfork);
5635 goto out_unlock_ilock;
5640 (whichfork ==
XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
5648 nmap = (nexleft > subnex) ? subnex : nexleft;
5651 map, &nmap, bmapi_flags);
5656 for (i = 0; i < nmap && nexleft && bmv->
bmv_length; i++) {
5680 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
5691 map[i].br_startblock))
5707 memset(&out[cur_ext], 0,
sizeof(out[cur_ext]));
5715 }
while (nmap && nexleft && bmv->
bmv_length);
5724 for (i = 0; i < cur_ext; i++) {
5728 error = formatter(&arg, &out[i], &full);
5733 if (is_vmalloc_addr(out))
5734 kmem_free_large(out);
5761 struct xfs_buf_log_item *bip;
5762 bip = (
struct xfs_buf_log_item *)lidp->
lid_item;
5765 return bip->bli_buf;
5785 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
5786 dmxr = mp->m_bmap_dmxr[0];
5808 if (*thispa == *pp) {
5809 xfs_warn(mp,
"%s: thispa(%d) == pp(%d) %Ld",
5812 panic(
"%s: ptrs are equal in node\n",
5869 while (level-- > 0) {
5892 xfs_check_block(block, mp, 0, 0);
5915 num_recs = xfs_btree_get_numrecs(block);
5935 for (j = 1; j < num_recs; j++) {
5974 xfs_warn(mp,
"%s: at error0", __func__);
5978 xfs_warn(mp,
"%s: BAD after btree leaves for %d extents",
5980 panic(
"%s: CORRUPTED BTREE OR SOMETHING", __func__);
6048 int level = levelin;
6116 for (b = 0; b < numrecs; b++) {
6128 struct xfs_mount *mp,
6136 for (b = 1; b <= numrecs; b++) {
6152 struct xfs_inode *ip,
6159 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6179 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
6180 xfs_alert(ip->i_mount,
6181 "Failed delalloc mapping lookup ino %lld fsb %lld.",
6182 ip->i_ino, start_fsb);
6202 xfs_bmap_init(&flist, &firstblock);
6212 }
while(remaining > 0);