34 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
39 static void ext3_get_group_no_and_offset(
struct super_block *
sb,
60 struct buffer_head ** bh)
62 unsigned long group_desc;
69 "block_group >= groups_count - "
70 "block_group = %d, groups_count = %lu",
81 "Group descriptor not loaded - "
82 "block_group = %d, group_desc = %lu, desc = %lu",
83 block_group, group_desc, offset);
96 struct buffer_head *bh)
103 group_first_block = ext3_group_first_block_no(sb, block_group);
107 offset = bitmap_blk - group_first_block;
114 offset = bitmap_blk - group_first_block;
121 offset = bitmap_blk - group_first_block;
123 offset + EXT3_SB(sb)->s_itb_per_group,
125 if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group)
131 "Invalid block bitmap - "
132 "block_group = %d, block = %lu",
133 block_group, bitmap_blk);
147 static struct buffer_head *
148 read_block_bitmap(
struct super_block *sb,
unsigned int block_group)
151 struct buffer_head * bh =
NULL;
157 trace_ext3_read_block_bitmap(sb, block_group);
159 bh = sb_getblk(sb, bitmap_blk);
162 "Cannot read block bitmap - "
163 "block_group = %d, block_bitmap = %u",
173 "Cannot read block bitmap - "
174 "block_group = %d, block_bitmap = %u",
178 ext3_valid_block_bitmap(sb, desc, block_group, bh);
219 printk(
"Block Allocation Reservation Windows Map (%s):\n", fn);
223 printk(
"reservation window 0x%p "
224 "start: %lu, end: %lu\n",
225 rsv, rsv->rsv_start, rsv->rsv_end);
226 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
227 printk(
"Bad reservation %p (start >= end)\n",
231 if (prev && prev->rsv_end >= rsv->rsv_start) {
232 printk(
"Bad reservation %p (prev->end >= start)\n",
238 printk(
"Restarting reservation walk in verbose mode\n");
246 printk(
"Window map complete.\n");
249 #define rsv_window_dump(root, verbose) \
250 __rsv_window_dump((root), (verbose), __func__)
252 #define rsv_window_dump(root, verbose) do {} while (0)
277 group_first_block = ext3_group_first_block_no(sb, group);
281 (rsv->
_rsv_end < group_first_block))
283 if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->
_rsv_start)
284 || (grp_goal + group_first_block > rsv->
_rsv_end)))
312 else if (goal > rsv->rsv_end)
323 if (rsv->rsv_start > goal) {
340 struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
348 trace_ext3_rsv_window_add(sb, rsv);
354 if (start < this->rsv_start)
356 else if (start > this->
rsv_end)
364 rb_link_node(node, parent, p);
377 static void rsv_window_remove(
struct super_block *sb,
475 trace_ext3_discard_reservation(inode, rsv);
476 rsv_window_remove(inode->
i_sb, rsv);
478 spin_unlock(rsv_lock);
492 unsigned long *pdquot_freed_blocks)
494 struct buffer_head *bitmap_bh =
NULL;
495 struct buffer_head *gd_bh;
506 *pdquot_freed_blocks = 0;
510 block + count < block ||
513 "Freeing blocks not in datazone - "
514 "block = "E3FSBLK", count = %lu", block, count);
518 ext3_debug (
"freeing block(s) %lu-%lu\n", block, block + count - 1);
535 bitmap_bh = read_block_bitmap(sb, block_group);
549 "Freeing blocks in system zones - "
550 "Block = "E3FSBLK", count = %lu",
560 BUFFER_TRACE(bitmap_bh,
"getting undo access");
570 BUFFER_TRACE(gd_bh,
"get_write_access");
575 jbd_lock_bh_state(bitmap_bh);
577 for (i = 0, group_freed = 0; i <
count; i++) {
581 #ifdef CONFIG_JBD_DEBUG
582 jbd_unlock_bh_state(bitmap_bh);
584 struct buffer_head *debug_bh;
585 debug_bh = sb_find_get_block(sb, block + i);
587 BUFFER_TRACE(debug_bh,
"Deleted!");
588 if (!bh2jh(bitmap_bh)->b_committed_data)
589 BUFFER_TRACE(debug_bh,
590 "No committed data in bitmap");
591 BUFFER_TRACE2(debug_bh, bitmap_bh,
"bitmap");
595 jbd_lock_bh_state(bitmap_bh);
597 if (need_resched()) {
598 jbd_unlock_bh_state(bitmap_bh);
600 jbd_lock_bh_state(bitmap_bh);
620 BUFFER_TRACE(bitmap_bh,
"set in b_committed_data");
621 J_ASSERT_BH(bitmap_bh,
622 bh2jh(bitmap_bh)->b_committed_data !=
NULL);
624 bh2jh(bitmap_bh)->b_committed_data);
631 BUFFER_TRACE(bitmap_bh,
"clear bit");
633 bit + i, bitmap_bh->b_data)) {
634 jbd_unlock_bh_state(bitmap_bh);
636 "bit already cleared for block "E3FSBLK,
638 jbd_lock_bh_state(bitmap_bh);
639 BUFFER_TRACE(bitmap_bh,
"bit already cleared");
644 jbd_unlock_bh_state(bitmap_bh);
646 spin_lock(sb_bgl_lock(sbi, block_group));
648 spin_unlock(sb_bgl_lock(sbi, block_group));
652 BUFFER_TRACE(bitmap_bh,
"dirtied bitmap block");
656 BUFFER_TRACE(gd_bh,
"dirtied group descriptor block");
659 *pdquot_freed_blocks += group_freed;
661 if (overflow && !err) {
684 unsigned long dquot_freed_blocks;
686 trace_ext3_free_blocks(inode, block, count);
688 if (dquot_freed_blocks)
689 dquot_free_block(inode, dquot_freed_blocks);
713 static int ext3_test_allocatable(
ext3_grpblk_t nr,
struct buffer_head *bh)
721 jbd_lock_bh_state(bh);
726 jbd_unlock_bh_state(bh);
747 while (start < maxblocks) {
749 if (next >= maxblocks)
751 if (ext3_test_allocatable(next, bh))
753 jbd_lock_bh_state(bh);
757 jbd_unlock_bh_state(bh);
776 find_next_usable_block(
ext3_grpblk_t start,
struct buffer_head *bh,
792 if (end_goal > maxblocks)
793 end_goal = maxblocks;
795 if (here < end_goal && ext3_test_allocatable(here, bh))
804 p = bh->b_data + (here >> 3);
805 r =
memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
806 next = (r - bh->b_data) << 3;
808 if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
816 here = bitmap_search_next_usable_block(here, bh, maxblocks);
840 jbd_lock_bh_state(bh);
847 jbd_unlock_bh_state(bh);
882 unsigned long num = 0;
886 group_first_block = ext3_group_first_block_no(sb, group);
888 start = my_rsv->
_rsv_start - group_first_block;
892 end = my_rsv->
_rsv_end - group_first_block + 1;
896 if ((start <= grp_goal) && (grp_goal < end))
911 if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) {
912 grp_goal = find_next_usable_block(start, bitmap_bh, end);
918 for (i = 0; i < 7 && grp_goal > start &&
919 ext3_test_allocatable(grp_goal - 1,
927 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group),
928 grp_goal, bitmap_bh)) {
941 while (num < *count && grp_goal < end
942 && ext3_test_allocatable(grp_goal, bitmap_bh)
943 && claim_block(sb_bgl_lock(EXT3_SB(sb), group),
944 grp_goal, bitmap_bh)) {
949 return grp_goal - num;
990 static int find_next_reservable_window(
1011 cur = rsv->rsv_end + 1;
1022 if (cur > last_block)
1036 if (cur + size <= rsv->rsv_start) {
1055 if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->
rsv_window)))
1056 rsv_window_remove(sb, my_rsv);
1065 my_rsv->rsv_start =
cur;
1066 my_rsv->rsv_end = cur + size - 1;
1114 unsigned int group,
struct buffer_head *bitmap_bh)
1119 struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
1122 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1124 group_first_block = ext3_group_first_block_no(sb, group);
1128 start_block = group_first_block;
1130 start_block = grp_goal + group_first_block;
1132 trace_ext3_alloc_new_reservation(sb, start_block);
1150 if ((my_rsv->rsv_start <= group_end_block) &&
1151 (my_rsv->rsv_end > group_end_block) &&
1152 (start_block >= my_rsv->rsv_start))
1156 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
1170 spin_lock(rsv_lock);
1174 search_head = search_reserve_window(fs_rsv_root, start_block);
1184 ret = find_next_reservable_window(search_head, my_rsv, sb,
1185 start_block, group_end_block);
1189 rsv_window_remove(sb, my_rsv);
1190 spin_unlock(rsv_lock);
1205 spin_unlock(rsv_lock);
1206 first_free_block = bitmap_search_next_usable_block(
1207 my_rsv->rsv_start - group_first_block,
1208 bitmap_bh, group_end_block - group_first_block + 1);
1210 if (first_free_block < 0) {
1215 spin_lock(rsv_lock);
1217 rsv_window_remove(sb, my_rsv);
1218 spin_unlock(rsv_lock);
1222 start_block = first_free_block + group_first_block;
1227 if (start_block >= my_rsv->rsv_start &&
1228 start_block <= my_rsv->rsv_end) {
1229 trace_ext3_reserved(sb, start_block, my_rsv);
1238 search_head = my_rsv;
1239 spin_lock(rsv_lock);
1265 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1267 if (!spin_trylock(rsv_lock))
1273 my_rsv->rsv_end +=
size;
1277 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1278 my_rsv->rsv_end +=
size;
1280 my_rsv->rsv_end = next_rsv->rsv_start - 1;
1282 spin_unlock(rsv_lock);
1315 ext3_try_to_allocate_with_rsv(
struct super_block *sb, handle_t *handle,
1316 unsigned int group,
struct buffer_head *bitmap_bh,
1319 unsigned long *count,
int *errp)
1324 unsigned long num = *
count;
1333 BUFFER_TRACE(bitmap_bh,
"get undo access for new block");
1346 if (my_rsv ==
NULL ) {
1347 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1348 grp_goal, count,
NULL);
1357 group_first_block = ext3_group_first_block_no(sb, group);
1376 if (rsv_is_empty(&my_rsv->
rsv_window) || (ret < 0) ||
1378 grp_goal, group, sb)) {
1381 ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1386 if (!goal_in_my_reservation(&my_rsv->
rsv_window,
1387 grp_goal, group, sb))
1389 }
else if (grp_goal >= 0) {
1390 int curr = my_rsv->rsv_end -
1391 (grp_goal + group_first_block) + 1;
1394 try_to_extend_reservation(my_rsv, sb,
1398 if ((my_rsv->rsv_start > group_last_block) ||
1399 (my_rsv->rsv_end < group_first_block)) {
1403 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1414 BUFFER_TRACE(bitmap_bh,
"journal_dirty_metadata for "
1424 BUFFER_TRACE(bitmap_bh,
"journal_release_buffer");
1425 ext3_journal_release_buffer(handle, bitmap_bh);
1435 static int ext3_has_free_blocks(
struct ext3_sb_info *sbi,
int use_reservation)
1464 if (!ext3_has_free_blocks(EXT3_SB(sb), 0) || (*retries)++ > 3)
1467 jbd_debug(1,
"%s: retrying operation after ENOSPC\n", sb->
s_id);
1489 struct buffer_head *bitmap_bh =
NULL;
1490 struct buffer_head *gdp_bh;
1498 int performed_allocation = 0;
1506 unsigned short windowsz = 0;
1508 static int goal_hits, goal_attempts;
1510 unsigned long ngroups;
1511 unsigned long num = *
count;
1519 err = dquot_alloc_block(inode, num);
1525 trace_ext3_request_blocks(inode, goal, num);
1538 block_i = EXT3_I(inode)->i_block_alloc_info;
1539 if (block_i && ((windowsz = block_i->
rsv_window_node.rsv_goal_size) > 0))
1542 if (!ext3_has_free_blocks(sbi,
IS_NOQUOTA(inode))) {
1555 goal_group = group_no;
1566 if (my_rsv && (free_blocks < windowsz)
1567 && (free_blocks > 0)
1571 if (free_blocks > 0) {
1574 bitmap_bh = read_block_bitmap(sb, group_no);
1577 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1578 group_no, bitmap_bh, grp_target_blk,
1579 my_rsv, &num, &fatal);
1582 if (grp_alloc_blk >= 0)
1586 ngroups = EXT3_SB(sb)->s_groups_count;
1593 for (bgi = 0; bgi < ngroups; bgi++) {
1595 if (group_no >= ngroups)
1612 if (my_rsv && (free_blocks <= (windowsz/2)))
1616 bitmap_bh = read_block_bitmap(sb, group_no);
1622 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1623 group_no, bitmap_bh, -1, my_rsv,
1627 if (grp_alloc_blk >= 0)
1640 group_no = goal_group;
1652 BUFFER_TRACE(gdp_bh,
"get_write_access");
1657 ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no);
1662 EXT3_SB(sb)->s_itb_per_group) ||
1664 EXT3_SB(sb)->s_itb_per_group)) {
1666 "Allocating block in system zone - "
1667 "blocks from "E3FSBLK", length %lu",
1676 performed_allocation = 1;
1678 #ifdef CONFIG_JBD_DEBUG
1680 struct buffer_head *debug_bh;
1683 debug_bh = sb_find_get_block(sb, ret_block);
1685 BUFFER_TRACE(debug_bh,
"state when allocated");
1686 BUFFER_TRACE2(debug_bh, bitmap_bh,
"bitmap state");
1690 jbd_lock_bh_state(bitmap_bh);
1691 spin_lock(sb_bgl_lock(sbi, group_no));
1692 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1695 for (i = 0; i < num; i++) {
1697 bh2jh(bitmap_bh)->b_committed_data)) {
1698 printk(
"%s: block was unexpectedly set in "
1699 "b_committed_data\n", __func__);
1704 spin_unlock(sb_bgl_lock(sbi, group_no));
1705 jbd_unlock_bh_state(bitmap_bh);
1710 "block("E3FSBLK") >= blocks count(%d) - "
1711 "block_group = %d, es == %p ", ret_block,
1721 ext3_debug(
"allocating block %lu. Goal hits %d of %d.\n",
1722 ret_block, goal_hits, goal_attempts);
1724 spin_lock(sb_bgl_lock(sbi, group_no));
1726 spin_unlock(sb_bgl_lock(sbi, group_no));
1729 BUFFER_TRACE(gdp_bh,
"journal_dirty_metadata for group descriptor");
1741 dquot_free_block(inode, *count-num);
1745 trace_ext3_allocate_blocks(inode, goal, num,
1746 (
unsigned long long)ret_block);
1760 if (!performed_allocation)
1761 dquot_free_block(inode, *count);
1769 unsigned long count = 1;
1785 unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
1790 struct buffer_head *bitmap_bh =
NULL;
1792 es = EXT3_SB(sb)->s_es;
1798 for (i = 0; i < ngroups; i++) {
1804 bitmap_bh = read_block_bitmap(sb, i);
1805 if (bitmap_bh ==
NULL)
1809 printk(
"group %d: stored = %d, counted = %lu\n",
1817 desc_count, bitmap_count);
1818 return bitmap_count;
1822 for (i = 0; i < ngroups; i++) {
1833 static inline int test_root(
int a,
int b)
1842 static int ext3_group_sparse(
int group)
1848 return (test_root(group, 7) || test_root(group, 5) ||
1849 test_root(group, 3));
1864 !ext3_group_sparse(group))
1869 static unsigned long ext3_bg_num_gdb_meta(
struct super_block *sb,
int group)
1875 if (group == first || group == first + 1 || group == last)
1880 static unsigned long ext3_bg_num_gdb_nometa(
struct super_block *sb,
int group)
1896 unsigned long first_meta_bg =
1901 metagroup < first_meta_bg)
1902 return ext3_bg_num_gdb_nometa(sb,group);
1904 return ext3_bg_num_gdb_meta(sb,group);
1932 struct buffer_head *gdp_bh, *bitmap_bh =
NULL;
1934 int err = 0, ret = 0;
1941 return PTR_ERR(handle);
1943 bitmap_bh = read_block_bitmap(sb, group);
1949 BUFFER_TRACE(bitmap_bh,
"getting undo access");
1960 BUFFER_TRACE(gdp_bh,
"get_write_access");
1969 while (start <= max) {
1970 start = bitmap_search_next_usable_block(start, bitmap_bh, max);
1980 && claim_block(sb_bgl_lock(sbi, group),
1990 ext3_group_first_block_no(sb, group);
1993 spin_lock(sb_bgl_lock(sbi, group));
1995 spin_unlock(sb_bgl_lock(sbi, group));
1998 free_blocks -= next -
start;
2000 if ((next - start) < minblocks)
2003 trace_ext3_discard_blocks(sb, discard_block, next - start);
2005 err = sb_issue_discard(sb, discard_block, next - start,
2007 count += (next -
start);
2014 for (bit = start; bit <
next; bit++) {
2015 BUFFER_TRACE(bitmap_bh,
"clear bit");
2017 bit, bitmap_bh->b_data)) {
2019 "bit already cleared for block "E3FSBLK,
2020 (
unsigned long)bit);
2021 BUFFER_TRACE(bitmap_bh,
"bit already cleared");
2028 spin_lock(sb_bgl_lock(sbi, group));
2030 spin_unlock(sb_bgl_lock(sbi, group));
2037 "returned error %d\n", err);
2041 if (fatal_signal_pending(
current)) {
2049 if (free_blocks < minblocks)
2054 BUFFER_TRACE(bitmap_bh,
"dirtied bitmap block");
2060 BUFFER_TRACE(gdp_bh,
"dirtied group descriptor block");
2065 ext3_debug(
"trimmed %d blocks in the group %d\n",
2091 unsigned long group, first_group, last_group;
2096 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
2105 start >= max_blks ||
2108 if (end >= max_blks)
2110 if (end <= first_data_blk)
2112 if (start < first_data_blk)
2113 start = first_data_blk;
2119 &first_group, &first_block);
2121 &last_group, &last_block);
2126 for (group = first_group; group <= last_group; group++) {
2137 if (group == last_group)
2141 ret = ext3_trim_all_free(sb, group, first_block,