15 #include <linux/time.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
25 #include <asm/byteorder.h>
57 if (start_bit >= end_bit)
60 ext4_debug(
"mark end bits +%d through +%d used\n", start_bit, end_bit);
61 for (i = start_bit; i < ((start_bit + 7) & ~7
UL); i++)
64 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
68 static unsigned ext4_init_inode_bitmap(
struct super_block *
sb,
69 struct buffer_head *bh,
73 J_ASSERT_BH(bh, buffer_locked(bh));
78 ext4_error(sb,
"Checksum bad for group %u", block_group);
101 set_buffer_uptodate(bh);
102 set_bitmap_uptodate(bh);
114 static struct buffer_head *
118 struct buffer_head *bh =
NULL;
126 bh = sb_getblk(sb, bitmap_blk);
128 ext4_error(sb,
"Cannot read inode bitmap - "
129 "block_group = %u, inode_bitmap = %llu",
130 block_group, bitmap_blk);
133 if (bitmap_uptodate(bh))
137 if (bitmap_uptodate(bh)) {
142 ext4_lock_group(sb, block_group);
144 ext4_init_inode_bitmap(sb, bh, block_group, desc);
145 set_bitmap_uptodate(bh);
146 set_buffer_uptodate(bh);
147 set_buffer_verified(bh);
148 ext4_unlock_group(sb, block_group);
152 ext4_unlock_group(sb, block_group);
154 if (buffer_uptodate(bh)) {
159 set_bitmap_uptodate(bh);
166 trace_ext4_load_inode_bitmap(sb, block_group);
171 if (!buffer_uptodate(bh)) {
173 ext4_error(sb,
"Cannot read inode bitmap - "
174 "block_group = %u, inode_bitmap = %llu",
175 block_group, bitmap_blk);
180 ext4_lock_group(sb, block_group);
181 if (!buffer_verified(bh) &&
184 ext4_unlock_group(sb, block_group);
186 ext4_error(sb,
"Corrupt inode bitmap - block_group = %u, "
187 "inode_bitmap = %llu", block_group, bitmap_blk);
190 ext4_unlock_group(sb, block_group);
191 set_buffer_verified(bh);
216 struct buffer_head *bitmap_bh =
NULL;
217 struct buffer_head *bh2;
222 struct ext4_sb_info *sbi;
227 "nonexistent device\n", __func__, __LINE__);
232 __func__, __LINE__, inode->
i_ino,
245 trace_ext4_free_inode(inode);
263 ext4_error(sb,
"reserved or nonexistent inode %lu", ino);
268 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
272 BUFFER_TRACE(bitmap_bh,
"get_write_access");
280 BUFFER_TRACE(bh2,
"get_write_access");
283 ext4_lock_group(sb, block_group);
285 if (fatal || !cleared) {
286 ext4_unlock_group(sb, block_group);
295 percpu_counter_dec(&sbi->s_dirs_counter);
300 ext4_unlock_group(sb, block_group);
302 percpu_counter_inc(&sbi->s_freeinodes_counter);
303 if (sbi->s_log_groups_per_flex) {
306 atomic_inc(&sbi->s_flex_groups[f].free_inodes);
310 BUFFER_TRACE(bh2,
"call ext4_handle_dirty_metadata");
314 BUFFER_TRACE(bitmap_bh,
"call ext4_handle_dirty_metadata");
319 ext4_error(sb,
"bit already cleared for inode %lu", ino);
323 ext4_std_error(sb, fatal);
388 struct ext4_sb_info *sbi =
EXT4_SB(sb);
391 unsigned int freei, avefreei, grp_free;
394 int max_dirs, min_inodes;
399 int flex_size = ext4_flex_bg_size(sbi);
402 ngroups = real_ngroups;
404 ngroups = (real_ngroups + flex_size - 1) >>
405 sbi->s_log_groups_per_flex;
406 parent_group >>= sbi->s_log_groups_per_flex;
409 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
410 avefreei = freei / ngroups;
412 percpu_counter_read_positive(&sbi->s_freeclusters_counter));
414 do_div(avefreec, ngroups);
415 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
418 ((parent == sb->
s_root->d_inode) ||
420 int best_ndir = inodes_per_group;
425 hinfo.seed = sbi->s_hash_seed;
430 parent_group = (unsigned)grp % ngroups;
431 for (i = 0; i < ngroups; i++) {
432 g = (parent_group +
i) % ngroups;
433 get_orlov_stats(sb, g, flex_size, &stats);
449 if (flex_size == 1) {
462 for (i = 0; i < flex_size; i++) {
463 if (grp+i >= real_ngroups)
474 max_dirs = ndirs / ngroups + inodes_per_group / 16;
475 min_inodes = avefreei - inodes_per_group*flex_size / 4;
478 min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
484 if (
EXT4_I(parent)->i_last_alloc_group != ~0) {
485 parent_group =
EXT4_I(parent)->i_last_alloc_group;
487 parent_group >>= sbi->s_log_groups_per_flex;
490 for (i = 0; i < ngroups; i++) {
491 grp = (parent_group +
i) % ngroups;
492 get_orlov_stats(sb, grp, flex_size, &stats);
503 ngroups = real_ngroups;
504 avefreei = freei / ngroups;
506 parent_group =
EXT4_I(parent)->i_block_group;
507 for (i = 0; i < ngroups; i++) {
508 grp = (parent_group +
i) % ngroups;
512 if (grp_free && grp_free >= avefreei) {
537 int flex_size = ext4_flex_bg_size(
EXT4_SB(sb));
550 parent_group &= ~(flex_size-1);
551 last = parent_group + flex_size;
554 for (i = parent_group; i < last; i++) {
561 if (!retry &&
EXT4_I(parent)->i_last_alloc_group != ~0) {
563 parent_group =
EXT4_I(parent)->i_last_alloc_group;
571 *group = parent_group + flex_size;
572 if (*group > ngroups)
574 return find_group_orlov(sb, parent, group, mode,
NULL);
580 *group = parent_group;
595 *group = (*group + parent->
i_ino) % ngroups;
601 for (i = 1; i < ngroups; i <<= 1) {
603 if (*group >= ngroups)
615 *group = parent_group;
616 for (i = 0; i < ngroups; i++) {
617 if (++*group >= ngroups)
641 struct buffer_head *inode_bitmap_bh =
NULL;
642 struct buffer_head *group_desc_bh;
644 unsigned long ino = 0;
648 struct ext4_sb_info *sbi;
656 return ERR_PTR(-
EPERM);
659 ngroups = ext4_get_groups_count(sb);
660 trace_ext4_request_inode(dir, mode);
668 goal = sbi->s_inode_goal;
670 if (goal && goal <=
le32_to_cpu(sbi->s_es->s_inodes_count)) {
678 ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
680 ret2 = find_group_other(sb, dir, &group, mode);
693 for (i = 0; i < ngroups; i++, ino = 0) {
704 if (++group == ngroups)
709 brelse(inode_bitmap_bh);
710 inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
711 if (!inode_bitmap_bh)
714 repeat_in_this_group:
716 inode_bitmap_bh->b_data,
719 if (++group == ngroups)
724 ext4_error(sb,
"reserved inode found cleared - "
725 "inode=%lu", ino + 1);
728 BUFFER_TRACE(inode_bitmap_bh,
"get_write_access");
732 ext4_lock_group(sb, group);
734 ext4_unlock_group(sb, group);
739 goto repeat_in_this_group;
745 BUFFER_TRACE(inode_bitmap_bh,
"call ext4_handle_dirty_metadata");
751 if (ext4_has_group_desc_csum(sb) &&
753 struct buffer_head *block_bitmap_bh;
756 BUFFER_TRACE(block_bitmap_bh,
"get block bitmap access");
759 brelse(block_bitmap_bh);
763 BUFFER_TRACE(block_bitmap_bh,
"dirty block bitmap");
765 brelse(block_bitmap_bh);
768 ext4_lock_group(sb, group);
777 ext4_unlock_group(sb, group);
783 BUFFER_TRACE(group_desc_bh,
"get_write_access");
789 if (ext4_has_group_desc_csum(sb)) {
791 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
794 ext4_lock_group(sb, group);
811 ext4_lock_group(sb, group);
817 if (sbi->s_log_groups_per_flex) {
823 if (ext4_has_group_desc_csum(sb)) {
828 ext4_unlock_group(sb, group);
830 BUFFER_TRACE(group_desc_bh,
"call ext4_handle_dirty_metadata");
835 percpu_counter_dec(&sbi->s_freeinodes_counter);
837 percpu_counter_inc(&sbi->s_dirs_counter);
839 if (sbi->s_log_groups_per_flex) {
840 flex_group = ext4_flex_group(sbi, group);
841 atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
845 i_uid_write(inode, owner[0]);
846 i_gid_write(inode, owner[1]);
858 ext4_current_time(inode);
874 ext4_handle_sync(handle);
883 spin_lock(&sbi->s_next_gen_lock);
885 spin_unlock(&sbi->s_next_gen_lock);
894 csum = ext4_chksum(sbi, sbi->s_csum_seed, (
__u8 *)&inum,
900 ext4_clear_state_flags(ei);
901 ext4_set_inode_state(inode, EXT4_STATE_NEW);
927 if (ext4_handle_valid(handle)) {
928 ei->
i_sync_tid = handle->h_transaction->t_tid;
934 ext4_std_error(sb, err);
939 trace_ext4_allocate_inode(inode, dir, mode);
942 ext4_std_error(sb, err);
947 brelse(inode_bitmap_bh);
959 brelse(inode_bitmap_bh);
969 struct buffer_head *bitmap_bh;
975 ext4_warning(sb,
"bad orphan ino %lu! e2fsck was run?", ino);
981 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
983 ext4_warning(sb,
"inode bitmap error for orphan %lu", ino);
1012 err = PTR_ERR(inode);
1015 ext4_warning(sb,
"bad orphan inode %lu! e2fsck was run?", ino);
1017 bit, (
unsigned long long)bitmap_bh->b_blocknr,
1034 return ERR_PTR(err);
1044 unsigned long bitmap_count,
x;
1045 struct buffer_head *bitmap_bh =
NULL;
1051 for (i = 0; i < ngroups; i++) {
1057 bitmap_bh = ext4_read_inode_bitmap(sb, i);
1069 "stored = %u, computed = %lu, %lu\n",
1074 for (i = 0; i < ngroups; i++) {
1088 unsigned long count = 0;
1091 for (i = 0; i < ngroups; i++) {
1111 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1112 struct ext4_sb_info *sbi =
EXT4_SB(sb);
1114 struct buffer_head *group_desc_bh;
1117 int num, ret = 0, used_blks = 0;
1137 if (IS_ERR(handle)) {
1138 ret = PTR_ERR(handle);
1151 sbi->s_inodes_per_block);
1153 if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
1154 ext4_error(sb,
"Something is wrong with group %u: "
1155 "used itable blocks: %d; "
1156 "itable unused count: %u",
1164 num = sbi->s_itb_per_group - used_blks;
1166 BUFFER_TRACE(group_desc_bh,
"get_write_access");
1180 ext4_debug(
"going to zero out inode table in group %d\n",
1182 ret = sb_issue_zeroout(sb, blk, num,
GFP_NOFS);
1189 ext4_lock_group(sb, group);
1192 ext4_unlock_group(sb, group);
1194 BUFFER_TRACE(group_desc_bh,
1195 "call ext4_handle_dirty_metadata");