55 #define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
56 #define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
115 static int xfs_uuid_table_size;
116 static uuid_t *xfs_uuid_table;
124 struct xfs_mount *
mp)
129 if (mp->m_flags & XFS_MOUNT_NOUUID)
133 xfs_warn(mp,
"Filesystem has nil UUID - can't mount");
138 for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
149 (xfs_uuid_table_size + 1) *
sizeof(*xfs_uuid_table),
150 xfs_uuid_table_size *
sizeof(*xfs_uuid_table),
152 hole = xfs_uuid_table_size++;
154 xfs_uuid_table[hole] = *
uuid;
161 xfs_warn(mp,
"Filesystem has duplicate UUID %pU - can't mount", uuid);
167 struct xfs_mount *
mp)
172 if (mp->m_flags & XFS_MOUNT_NOUUID)
176 for (i = 0; i < xfs_uuid_table_size; i++) {
184 ASSERT(i < xfs_uuid_table_size);
207 trace_xfs_perag_get(mp, agno, ref,
_RET_IP_);
216 struct xfs_mount *
mp,
226 (
void **)&pag, first, 1, tag);
267 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
268 spin_lock(&mp->m_perag_lock);
270 spin_unlock(&mp->m_perag_lock);
308 int loud = !(flags & XFS_MFSI_QUIET);
319 xfs_warn(mp,
"bad magic number");
323 if (!xfs_sb_good_version(sbp)) {
325 xfs_warn(mp,
"bad version");
330 sbp->
sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
333 "filesystem is marked as having an external log; "
334 "specify logdev on the mount command line.");
339 sbp->
sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
342 "filesystem is marked as having an internal log; "
343 "do not specify logdev on the mount command line.");
387 "File system with blocksize %d bytes. "
388 "Only pagesize (%ld) or less will currently work.",
405 xfs_warn(mp,
"inode size of %d bytes not supported",
414 "file system too large to be mounted on this system.");
420 xfs_warn(mp,
"file system busy");
427 if (
unlikely(!xfs_sb_version_hasdirv2(sbp))) {
430 "file system using version 1 directory format");
456 for (index = 0; index < agcount; index++) {
462 if (!first_initialised)
463 first_initialised =
index;
479 spin_lock(&mp->m_perag_lock);
482 spin_unlock(&mp->m_perag_lock);
483 radix_tree_preload_end();
487 spin_unlock(&mp->m_perag_lock);
488 radix_tree_preload_end();
499 mp->m_flags |= XFS_MOUNT_32BITINODES;
501 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
503 if (mp->m_flags & XFS_MOUNT_32BITINODES)
514 for (; index > first_initialised; index--) {
523 struct xfs_mount *
mp,
526 struct xfs_sb *to = &mp->m_sb;
587 xfs_caddr_t to_ptr = (xfs_caddr_t)to;
588 xfs_caddr_t from_ptr = (xfs_caddr_t)from;
599 first = xfs_sb_info[
f].offset;
600 size = xfs_sb_info[f + 1].offset -
first;
604 if (size == 1 || xfs_sb_info[f].
type == 1) {
605 memcpy(to_ptr + first, from_ptr + first, size);
609 *(
__be16 *)(to_ptr + first) =
613 *(
__be32 *)(to_ptr + first) =
617 *(
__be64 *)(to_ptr + first) =
625 fields &= ~(1
LL <<
f);
640 int loud = !(flags & XFS_MFSI_QUIET);
654 BTOBB(sector_size), 0);
657 xfs_warn(mp,
"SB buffer read failed");
669 xfs_warn(mp,
"SB validate failed");
676 if (sector_size > mp->m_sb.sb_sectsize) {
678 xfs_warn(mp,
"device supports %u byte sectors (not %u)",
679 sector_size, mp->m_sb.sb_sectsize);
688 if (sector_size < mp->m_sb.sb_sectsize) {
690 sector_size = mp->m_sb.sb_sectsize;
695 xfs_icsb_reinit_counters(mp);
717 mp->m_agfrotor = mp->m_agirotor = 0;
719 mp->m_maxagi = mp->m_sb.sb_agcount;
723 mp->m_agno_log = xfs_highbit32(sbp->
sb_agcount - 1) + 1;
727 mp->m_blockwmask = mp->m_blockwsize - 1;
731 mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
732 mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
736 mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
737 mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
741 mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
742 mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
747 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->
sb_inopblog;
771 for (index = 0; index < agcount; index++) {
795 spin_lock(&mp->m_sb_lock);
799 spin_unlock(&mp->m_sb_lock);
802 xfs_icsb_reinit_counters(mp);
820 if ((
BBTOB(mp->m_dalign) & mp->m_blockmask) ||
821 (
BBTOB(mp->m_swidth) & mp->m_blockmask)) {
822 if (mp->m_flags & XFS_MOUNT_RETERR) {
823 xfs_warn(mp,
"alignment check failed: "
824 "(sunit/swidth vs. blocksize)");
827 mp->m_dalign = mp->m_swidth = 0;
833 if (mp->m_dalign && (sbp->
sb_agblocks % mp->m_dalign)) {
834 if (mp->m_flags & XFS_MOUNT_RETERR) {
835 xfs_warn(mp,
"alignment check failed: "
836 "(sunit/swidth vs. ag size)");
840 "stripe alignment turned off: sunit(%d)/swidth(%d) "
841 "incompatible with agsize(%d)",
842 mp->m_dalign, mp->m_swidth,
847 }
else if (mp->m_dalign) {
850 if (mp->m_flags & XFS_MOUNT_RETERR) {
851 xfs_warn(mp,
"alignment check failed: "
852 "sunit(%d) less than bsize(%d)",
865 if (xfs_sb_version_hasdalign(sbp)) {
866 if (sbp->
sb_unit != mp->m_dalign) {
870 if (sbp->
sb_width != mp->m_swidth) {
875 }
else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
876 xfs_sb_version_hasdalign(&mp->m_sb)) {
900 do_div(icount, mp->m_ialloc_blks);
901 mp->m_maxicount = (icount * mp->m_ialloc_blks) <<
918 int readio_log, writeio_log;
920 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
921 if (mp->m_flags & XFS_MOUNT_WSYNC) {
922 readio_log = XFS_WSYNC_READIO_LOG;
923 writeio_log = XFS_WSYNC_WRITEIO_LOG;
925 readio_log = XFS_READIO_LOG_LARGE;
926 writeio_log = XFS_WRITEIO_LOG_LARGE;
929 readio_log = mp->m_readio_log;
930 writeio_log = mp->m_writeio_log;
936 mp->m_readio_log = readio_log;
938 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->
sb_blocklog);
942 mp->m_writeio_log = writeio_log;
944 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->
sb_blocklog);
952 struct xfs_mount *
mp)
956 for (i = 0; i < XFS_LOWSP_MAX; i++) {
957 __uint64_t space = mp->m_sb.sb_dblocks;
960 mp->m_low_space[
i] = space * (i + 1);
971 if (xfs_sb_version_hasalign(&mp->m_sb) &&
972 mp->m_sb.sb_inoalignmt >=
974 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
976 mp->m_inoalign_mask = 0;
981 if (mp->m_dalign && mp->m_inoalign_mask &&
982 !(mp->m_dalign & mp->m_inoalign_mask))
983 mp->m_sinoalign = mp->m_dalign;
999 xfs_warn(mp,
"filesystem size mismatch detected");
1006 xfs_warn(mp,
"last sector read failed");
1011 if (mp->m_logdev_targp != mp->m_ddev_targp) {
1014 xfs_warn(mp,
"log size mismatch detected");
1021 xfs_warn(mp,
"log device read failed");
1034 struct xfs_mount *
mp)
1037 struct xfs_trans *tp;
1045 if (mp->m_sb.sb_qflags == 0)
1047 spin_lock(&mp->m_sb_lock);
1048 mp->m_sb.sb_qflags = 0;
1049 spin_unlock(&mp->m_sb_lock);
1055 if (mp->m_flags & XFS_MOUNT_RDONLY)
1063 xfs_alert(mp,
"%s: Superblock update failed!", __func__);
1083 resblks = mp->m_sb.sb_dblocks;
1085 resblks =
min_t(__uint64_t, resblks, 8192);
1106 uint quotamount = 0;
1107 uint quotaflags = 0;
1128 if (xfs_sb_has_mismatched_features2(sbp)) {
1129 xfs_warn(mp,
"correcting sb_features alignment problem");
1138 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1139 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1140 mp->m_flags |= XFS_MOUNT_ATTR2;
1143 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1144 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1145 xfs_sb_version_removeattr2(&mp->m_sb);
1199 goto out_remove_uuid;
1206 xfs_warn(mp,
"RT mount failed");
1207 goto out_remove_uuid;
1223 mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100;
1237 xfs_warn(mp,
"Failed per-ag init: %d", error);
1238 goto out_remove_uuid;
1242 xfs_warn(mp,
"no log defined");
1245 goto out_free_perag;
1255 xfs_warn(mp,
"log mount failed");
1278 if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
1279 !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
1280 !mp->m_sb.sb_inprogress) {
1292 xfs_warn(mp,
"failed to read root inode");
1293 goto out_log_dealloc;
1299 xfs_warn(mp,
"corrupted root inode %llu: not a directory",
1300 (
unsigned long long)rip->i_ino);
1319 xfs_warn(mp,
"failed to read RT inodes");
1328 if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1331 xfs_warn(mp,
"failed to write sb changes");
1352 xfs_notice(mp,
"resetting quota flags");
1366 xfs_warn(mp,
"log mount finish failed");
1374 ASSERT(mp->m_qflags == 0);
1375 mp->m_qflags = quotaflags;
1391 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
1396 "Unable to allocate reserve blocks. Continuing without reserve pool.");
1408 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
1425 struct xfs_mount *
mp)
1432 IRELE(mp->m_rootip);
1484 xfs_warn(mp,
"Unable to free reserved block pool. "
1485 "Freespace may not be correct on next mount.");
1489 xfs_warn(mp,
"Unable to update superblock counters. "
1490 "Freespace may not be correct on next mount.");
1521 return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) ||
1522 (mp->m_flags & XFS_MOUNT_RDONLY));
1543 xfs_icsb_sync_counters(mp, 0);
1549 if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1561 xfs_trans_set_sync(tp);
1597 last = xfs_sb_info[f + 1].offset - 1;
1600 ASSERT((1
LL << f) & XFS_SB_MOD_BITS);
1601 first = xfs_sb_info[
f].offset;
1625 long long res_used, rem;
1635 lcounter = (
long long)mp->m_sb.sb_icount;
1641 mp->m_sb.sb_icount = lcounter;
1644 lcounter = (
long long)mp->m_sb.sb_ifree;
1650 mp->m_sb.sb_ifree = lcounter;
1653 lcounter = (
long long)
1655 res_used = (
long long)(mp->m_resblks - mp->m_resblks_avail);
1658 if (res_used > delta) {
1659 mp->m_resblks_avail +=
delta;
1661 rem = delta - res_used;
1662 mp->m_resblks_avail = mp->m_resblks;
1667 if (lcounter >= 0) {
1668 mp->m_sb.sb_fdblocks = lcounter +
1680 lcounter = (
long long)mp->m_resblks_avail + delta;
1681 if (lcounter >= 0) {
1682 mp->m_resblks_avail = lcounter;
1686 "Filesystem \"%s\": reserve blocks depleted! "
1687 "Consider increasing reserve pool size.",
1695 lcounter = (
long long)mp->m_sb.sb_frextents;
1700 mp->m_sb.sb_frextents = lcounter;
1703 lcounter = (
long long)mp->m_sb.sb_dblocks;
1709 mp->m_sb.sb_dblocks = lcounter;
1712 scounter = mp->m_sb.sb_agcount;
1718 mp->m_sb.sb_agcount = scounter;
1721 scounter = mp->m_sb.sb_imax_pct;
1727 mp->m_sb.sb_imax_pct = scounter;
1730 scounter = mp->m_sb.sb_rextsize;
1736 mp->m_sb.sb_rextsize = scounter;
1739 scounter = mp->m_sb.sb_rbmblocks;
1745 mp->m_sb.sb_rbmblocks = scounter;
1748 lcounter = (
long long)mp->m_sb.sb_rblocks;
1754 mp->m_sb.sb_rblocks = lcounter;
1757 lcounter = (
long long)mp->m_sb.sb_rextents;
1763 mp->m_sb.sb_rextents = lcounter;
1766 scounter = mp->m_sb.sb_rextslog;
1772 mp->m_sb.sb_rextslog = scounter;
1788 struct xfs_mount *
mp,
1795 #ifdef HAVE_PERCPU_SB
1798 spin_lock(&mp->m_sb_lock);
1800 spin_unlock(&mp->m_sb_lock);
1819 struct xfs_mount *
mp,
1833 spin_lock(&mp->m_sb_lock);
1834 for (msbp = msb; msbp < (msb + nmsb); msbp++) {
1839 msbp->msb_delta, rsvd);
1843 spin_unlock(&mp->m_sb_lock);
1847 while (--msbp >= msb) {
1849 -msbp->msb_delta, rsvd);
1852 spin_unlock(&mp->m_sb_lock);
1867 struct xfs_mount *
mp,
1870 struct xfs_buf *bp = mp->m_sb_bp;
1888 struct xfs_mount *
mp)
1890 struct xfs_buf *bp = mp->m_sb_bp;
1932 struct xfs_mount *
mp,
1938 xfs_notice(mp,
"%s required on read-only device.", message);
1939 xfs_notice(mp,
"write access unavailable, cannot proceed.");
1945 #ifdef HAVE_PERCPU_SB
1998 #ifdef CONFIG_HOTPLUG_CPU
2008 xfs_icsb_cpu_notify(
2013 xfs_icsb_cnts_t *cntp;
2016 mp = (xfs_mount_t *)
container_of(nfb, xfs_mount_t, m_icsb_notifier);
2017 cntp = (xfs_icsb_cnts_t *)
2024 memset(cntp, 0,
sizeof(xfs_icsb_cnts_t));
2032 xfs_icsb_unlock(mp);
2040 spin_lock(&mp->m_sb_lock);
2045 mp->m_sb.sb_icount += cntp->icsb_icount;
2046 mp->m_sb.sb_ifree += cntp->icsb_ifree;
2047 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
2049 memset(cntp, 0,
sizeof(xfs_icsb_cnts_t));
2054 spin_unlock(&mp->m_sb_lock);
2055 xfs_icsb_unlock(mp);
2064 xfs_icsb_init_counters(
2067 xfs_icsb_cnts_t *cntp;
2071 if (mp->m_sb_cnts ==
NULL)
2074 #ifdef CONFIG_HOTPLUG_CPU
2075 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
2076 mp->m_icsb_notifier.priority = 0;
2081 cntp = (xfs_icsb_cnts_t *)
per_cpu_ptr(mp->m_sb_cnts, i);
2082 memset(cntp, 0,
sizeof(xfs_icsb_cnts_t));
2091 mp->m_icsb_counters = -1;
2096 xfs_icsb_reinit_counters(
2104 mp->m_icsb_counters = -1;
2108 xfs_icsb_unlock(mp);
2112 xfs_icsb_destroy_counters(
2115 if (mp->m_sb_cnts) {
2124 xfs_icsb_cnts_t *icsbp)
2132 xfs_icsb_unlock_cntr(
2133 xfs_icsb_cnts_t *icsbp)
2135 clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
2140 xfs_icsb_lock_all_counters(
2143 xfs_icsb_cnts_t *cntp;
2147 cntp = (xfs_icsb_cnts_t *)
per_cpu_ptr(mp->m_sb_cnts, i);
2148 xfs_icsb_lock_cntr(cntp);
2153 xfs_icsb_unlock_all_counters(
2156 xfs_icsb_cnts_t *cntp;
2160 cntp = (xfs_icsb_cnts_t *)
per_cpu_ptr(mp->m_sb_cnts, i);
2161 xfs_icsb_unlock_cntr(cntp);
2168 xfs_icsb_cnts_t *
cnt,
2171 xfs_icsb_cnts_t *cntp;
2174 memset(cnt, 0,
sizeof(xfs_icsb_cnts_t));
2176 if (!(flags & XFS_ICSB_LAZY_COUNT))
2177 xfs_icsb_lock_all_counters(mp);
2180 cntp = (xfs_icsb_cnts_t *)
per_cpu_ptr(mp->m_sb_cnts, i);
2181 cnt->icsb_icount += cntp->icsb_icount;
2182 cnt->icsb_ifree += cntp->icsb_ifree;
2183 cnt->icsb_fdblocks += cntp->icsb_fdblocks;
2186 if (!(flags & XFS_ICSB_LAZY_COUNT))
2187 xfs_icsb_unlock_all_counters(mp);
2191 xfs_icsb_counter_disabled(
2196 return test_bit(field, &mp->m_icsb_counters);
2200 xfs_icsb_disable_counter(
2204 xfs_icsb_cnts_t
cnt;
2216 if (xfs_icsb_counter_disabled(mp, field))
2219 xfs_icsb_lock_all_counters(mp);
2223 xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
2226 mp->m_sb.sb_icount = cnt.icsb_icount;
2229 mp->m_sb.sb_ifree = cnt.icsb_ifree;
2232 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2239 xfs_icsb_unlock_all_counters(mp);
2243 xfs_icsb_enable_counter(
2249 xfs_icsb_cnts_t *cntp;
2254 xfs_icsb_lock_all_counters(mp);
2259 cntp->icsb_icount = count + resid;
2262 cntp->icsb_ifree = count + resid;
2265 cntp->icsb_fdblocks = count + resid;
2274 xfs_icsb_unlock_all_counters(mp);
2278 xfs_icsb_sync_counters_locked(
2282 xfs_icsb_cnts_t
cnt;
2284 xfs_icsb_count(mp, &cnt, flags);
2287 mp->m_sb.sb_icount = cnt.icsb_icount;
2289 mp->m_sb.sb_ifree = cnt.icsb_ifree;
2291 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2298 xfs_icsb_sync_counters(
2302 spin_lock(&mp->m_sb_lock);
2303 xfs_icsb_sync_counters_locked(mp, flags);
2304 spin_unlock(&mp->m_sb_lock);
2323 #define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
2324 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2325 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2337 xfs_icsb_disable_counter(mp, field);
2342 count = mp->m_sb.sb_icount;
2343 resid =
do_div(count, weight);
2344 if (count <
max(min, XFS_ICSB_INO_CNTR_REENABLE))
2348 count = mp->m_sb.sb_ifree;
2349 resid =
do_div(count, weight);
2350 if (count <
max(min, XFS_ICSB_INO_CNTR_REENABLE))
2354 count = mp->m_sb.sb_fdblocks;
2355 resid =
do_div(count, weight);
2356 if (count <
max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
2365 xfs_icsb_enable_counter(mp, field, count, resid);
2374 spin_lock(&mp->m_sb_lock);
2376 spin_unlock(&mp->m_sb_lock);
2380 xfs_icsb_modify_counters(
2386 xfs_icsb_cnts_t *icsbp;
2398 if (
unlikely(xfs_icsb_counter_disabled(mp, field)))
2400 xfs_icsb_lock_cntr(icsbp);
2401 if (
unlikely(xfs_icsb_counter_disabled(mp, field))) {
2402 xfs_icsb_unlock_cntr(icsbp);
2408 lcounter = icsbp->icsb_icount;
2411 goto balance_counter;
2412 icsbp->icsb_icount = lcounter;
2416 lcounter = icsbp->icsb_ifree;
2419 goto balance_counter;
2420 icsbp->icsb_ifree = lcounter;
2424 BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
2429 goto balance_counter;
2436 xfs_icsb_unlock_cntr(icsbp);
2456 if (!(xfs_icsb_counter_disabled(mp, field))) {
2457 xfs_icsb_unlock(mp);
2472 spin_lock(&mp->m_sb_lock);
2474 spin_unlock(&mp->m_sb_lock);
2484 xfs_icsb_unlock(mp);
2488 xfs_icsb_unlock_cntr(icsbp);
2508 xfs_icsb_unlock(mp);