40 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
42 #define XFSA_FIXUP_BNO_OK 1
43 #define XFSA_FIXUP_CNT_OK 2
130 if (!error && *stat == 1) {
159 *resbno = aligned_bno;
160 *reslen = diff >= len ? 0 : len - diff;
187 ASSERT(freelen >= wantlen);
188 freeend = freebno + freelen;
189 wantend = wantbno + wantlen;
190 if (freebno >= wantbno) {
191 if ((newbno1 =
roundup(freebno, alignment)) >= freeend)
193 }
else if (freeend >= wantend && alignment > 1) {
194 newbno1 =
roundup(wantbno, alignment);
196 if (newbno1 >= freeend)
200 if (newbno2 < freebno)
205 if (newlen1 < newlen2 ||
206 (newlen1 == newlen2 &&
212 }
else if (freeend >= wantend) {
214 }
else if (alignment > 1) {
215 newbno1 =
roundup(freeend - wantlen, alignment);
216 if (newbno1 > freeend - wantlen &&
217 newbno1 - alignment >= freebno)
219 else if (newbno1 >= freeend)
222 newbno1 = freeend - wantlen;
243 ASSERT(rlen <= args->maxlen);
244 if (args->
prod <= 1 || rlen < args->
mod || rlen == args->
maxlen ||
245 (args->
mod == 0 && rlen < args->prod))
247 k = rlen % args->
prod;
251 if ((
int)(rlen = rlen - k - args->
mod) < (
int)args->
minlen)
254 if ((
int)(rlen = rlen - args->
prod - (args->
mod - k)) <
259 ASSERT(rlen <= args->maxlen);
320 i == 1 && nfbno1 == fbno && nflen1 == flen);
335 i == 1 && nfbno1 == fbno && nflen1 == flen);
361 if (rbno == fbno && rlen == flen)
363 else if (rbno == fbno) {
364 nfbno1 = rbno + rlen;
365 nflen1 = flen - rlen;
367 }
else if (rbno + rlen == fbno + flen) {
369 nflen1 = flen - rlen;
373 nflen1 = rbno - fbno;
374 nfbno2 = rbno + rlen;
375 nflen2 = (fbno + flen) - nfbno2;
447 error = xfs_trans_read_buf(
448 mp, tp, mp->m_ddev_targp,
453 ASSERT(!xfs_buf_geterror(bp));
461 struct xfs_trans *tp,
471 xfs_trans_agblocks_delta(tp, len);
507 switch (args->
type) {
533 -((
long)(args->
len)));
545 -((
long)(args->
len)));
599 ASSERT(fbno <= args->agbno);
610 if (tbno > args->
agbno)
612 if (tlen < args->minlen)
615 if (tend < args->agbno + args->
minlen)
651 trace_xfs_alloc_exact_done(args);
658 trace_xfs_alloc_exact_notfound(args);
663 trace_xfs_alloc_exact_error(args);
706 if (*sbnoa >= args->
agbno + gdiff)
709 if (*sbnoa <= args->agbno - gdiff)
716 if (*slena >= args->
minlen) {
785 #if defined(DEBUG) && defined(__KERNEL__)
820 if (i == 0 || ltlen == 0) {
822 trace_xfs_alloc_near_noentry(args);
845 #if defined(DEBUG) && defined(__KERNEL__)
862 if (ltlen >= args->
minlen)
872 for (j = 1, blen = 0, bdiff = 0;
873 !error && j && (blen < args->
maxlen || bdiff > 0);
884 if (ltlena < args->minlen)
889 if (args->
len < blen)
892 args->
alignment, ltbnoa, ltlena, <new);
894 (args->
len > blen || ltdiff < bdiff)) {
918 trace_xfs_alloc_near_nominleft(args);
927 ASSERT(bnew + blen <= ltbno + ltlen);
942 trace_xfs_alloc_near_first(args);
972 bno_cur_gt = bno_cur_lt;
1005 if (ltlena >= args->
minlen)
1021 if (gtlena >= args->
minlen)
1031 }
while (bno_cur_lt || bno_cur_gt);
1036 if (bno_cur_lt && bno_cur_gt) {
1037 if (ltlena >= args->
minlen) {
1044 args->
alignment, ltbnoa, ltlena, <new);
1047 &bno_cur_lt, &bno_cur_gt,
1048 ltdiff, >bno, >len,
1060 args->
alignment, gtbnoa, gtlena, >new);
1063 &bno_cur_gt, &bno_cur_lt,
1064 gtdiff, <bno, <len,
1076 if (bno_cur_lt ==
NULL && bno_cur_gt ==
NULL) {
1080 trace_xfs_alloc_near_busy(args);
1084 trace_xfs_alloc_size_neither(args);
1096 bno_cur_lt = bno_cur_gt;
1112 trace_xfs_alloc_near_nominleft(args);
1119 ltbnoa, ltlena, <new);
1121 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1123 args->
agbno = ltnew;
1130 trace_xfs_alloc_near_greater(args);
1132 trace_xfs_alloc_near_lesser(args);
1139 trace_xfs_alloc_near_error(args);
1140 if (cnt_cur !=
NULL)
1142 if (bno_cur_lt !=
NULL)
1144 if (bno_cur_gt !=
NULL)
1192 if (!i || forced > 1) {
1197 if (i == 0 || flen == 0) {
1199 trace_xfs_alloc_size_noentry(args);
1220 if (rlen >= args->
maxlen)
1239 trace_xfs_alloc_size_busy(args);
1255 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1256 if (rlen < args->maxlen) {
1275 if (flen < bestrlen)
1281 (rlen <= flen && rbno + rlen <= fbno + flen),
1283 if (rlen > bestrlen) {
1288 if (rlen == args->
maxlen)
1306 if (rlen < args->minlen) {
1309 trace_xfs_alloc_size_busy(args);
1331 cnt_cur = bno_cur =
NULL;
1338 trace_xfs_alloc_size_done(args);
1342 trace_xfs_alloc_size_error(args);
1351 trace_xfs_alloc_size_nominleft(args);
1400 args->
agno, fbno, 0);
1410 trace_xfs_alloc_small_freelist(args);
1430 if (flen < args->minlen) {
1432 trace_xfs_alloc_small_notenough(args);
1438 trace_xfs_alloc_small_done(args);
1442 trace_xfs_alloc_small_error(args);
1495 if (ltbno + ltlen < bno)
1522 if (bno + len < gtbno)
1541 if (haveleft && haveright) {
1585 i == 1 && xxbno == ltbno && xxlen == ltlen,
1593 nlen = len + ltlen + gtlen;
1601 else if (haveleft) {
1627 else if (haveright) {
1685 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1690 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1716 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1717 minleafrecs = mp->m_alloc_mnr[0];
1718 minnoderecs = mp->m_alloc_mnr[1];
1719 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1720 for (level = 1; maxblocks > 1; level++)
1721 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1722 mp->m_ag_maxlevels =
level;
1730 struct xfs_mount *
mp,
1822 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1823 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1836 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1840 longest = (longest >
delta) ? (longest - delta) :
1869 memset(&targs, 0,
sizeof(targs));
1900 if (flags & XFS_ALLOC_FLAG_FREEING)
1909 for (bno = targs.
agbno; bno < targs.
agbno + targs.
len; bno++) {
1968 xfs_trans_agflist_delta(tp, -1);
1996 static const short offsets[] = {
2070 xfs_trans_agflist_delta(tp, 1);
2088 (
int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
2089 (
int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
2099 struct xfs_mount *
mp,
2100 struct xfs_trans *tp,
2110 error = xfs_trans_read_buf(
2111 mp, tp, mp->m_ddev_targp,
2119 ASSERT(!(*bpp)->b_error);
2133 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
2152 struct xfs_mount *
mp,
2153 struct xfs_trans *tp,
2171 ASSERT(!(*bpp)->b_error);
2190 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2233 agsize = mp->m_sb.sb_agblocks;
2234 if (args->
maxlen > agsize)
2248 trace_xfs_alloc_vextent_badargs(args);
2266 trace_xfs_alloc_vextent_nofix(args);
2270 trace_xfs_alloc_vextent_noagbp(args);
2283 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2285 ((mp->m_agfrotor / rotorstep) %
2286 mp->m_sb.sb_agcount), 0);
2302 args->
agno = sagno = (mp->m_agfrotor / rotorstep) %
2303 mp->m_sb.sb_agcount;
2329 if (no_min) args->
minleft = 0;
2333 trace_xfs_alloc_vextent_nofix(args);
2345 trace_xfs_alloc_vextent_loopfailed(args);
2350 if (args->
agno == sagno &&
2360 if (++(args->
agno) == mp->m_sb.sb_agcount) {
2370 if (args->
agno == sagno) {
2373 trace_xfs_alloc_vextent_allfailed(args);
2390 if (args->
agno == sagno)
2391 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2392 (mp->m_sb.sb_agcount * rotorstep);
2394 mp->m_agfrotor = (args->
agno * rotorstep + 1) %
2395 (mp->m_sb.sb_agcount * rotorstep);
2438 args.
mp = tp->t_mountp;
2445 if (args.
agno >= args.
mp->m_sb.sb_agcount)
2449 if (args.
agbno >= args.
mp->m_sb.sb_agblocks)
2460 if (args.
agbno + len >