18 #include <linux/types.h>
58 static int __ocfs2_move_extent(handle_t *
handle,
70 u64 old_blkno = ocfs2_clusters_to_blocks(inode->
i_sb, p_cpos);
73 p_cpos, new_p_cpos, len);
79 memset(&replace_rec, 0,
sizeof(replace_rec));
103 "Inode %llu has an extent at cpos %u which can no "
104 "longer be found.\n",
105 (
unsigned long long)ino, cpos);
120 context->
et.et_root_bh,
128 &replace_rec, context->
meta_ac,
145 ocfs2_blocks_to_clusters(osb->
sb,
165 static int ocfs2_lock_allocators_move_extents(
struct inode *inode,
167 u32 clusters_to_move,
168 u32 extents_to_split,
174 int ret, num_free_extents;
175 unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
179 if (num_free_extents < 0) {
180 ret = num_free_extents;
185 if (!num_free_extents ||
186 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
187 extra_blocks += ocfs2_extend_meta_needed(et->
et_root_el);
203 *credits += ocfs2_calc_extend_credits(osb->
sb, et->
et_root_el,
204 clusters_to_move + 2);
206 mlog(0,
"reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
207 extra_blocks, clusters_to_move, *credits);
227 u32 cpos,
u32 phys_cpos,
u32 *len,
int ext_flags)
229 int ret, credits = 0, extra_blocks = 0, partial = context->
partial;
231 struct inode *inode = context->
inode;
235 u32 new_phys_cpos, new_len;
236 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->
i_sb, phys_cpos);
240 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
264 ret = ocfs2_lock_allocators_move_extents(inode, &context->
et, *len, 1,
267 extra_blocks, &credits);
286 goto out_unlock_mutex;
291 if (IS_ERR(handle)) {
292 ret = PTR_ERR(handle);
294 goto out_unlock_mutex;
298 &new_phys_cpos, &new_len);
310 if (new_len != *len) {
311 mlog(0,
"len_claimed: %u, len: %u\n", new_len, *len);
319 mlog(0,
"cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
320 phys_cpos, new_phys_cpos);
322 ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
323 new_phys_cpos, ext_flags);
327 if (partial && (new_len != *len))
364 static int ocfs2_find_victim_alloc_group(
struct inode *inode,
368 struct buffer_head **ret_bh)
370 int ret,
i, bits_per_unit = 0;
375 struct buffer_head *ac_bh =
NULL, *gd_bh =
NULL;
381 ocfs2_sprintf_system_inode_name(namebuf,
sizeof(namebuf), type, slot);
401 inode->
i_sb->s_blocksize_bits;
443 *vict_bit = (vict_blkno - blkno) >>
445 mlog(0,
"find the victim group: #%llu, "
446 "total_bits: %u, vict_bit: %u\n",
468 static int ocfs2_validate_and_adjust_move_goal(
struct inode *inode,
471 int ret, goal_bit = 0;
473 struct buffer_head *gd_bh =
NULL;
477 inode->
i_sb->s_blocksize_bits);
482 range->
me_goal = ocfs2_block_to_cluster_start(inode->
i_sb,
495 ret = ocfs2_find_victim_alloc_group(inode, range->
me_goal,
516 mlog(0,
"extents get ready to be moved to #%llu block\n",
525 static void ocfs2_probe_alloc_group(
struct inode *inode,
struct buffer_head *bh,
526 int *goal_bit,
u32 move_len,
u32 max_hop,
529 int i,
used, last_free_bits = 0, base_bit = *goal_bit;
531 u32 base_cpos = ocfs2_blocks_to_clusters(inode->
i_sb,
542 if ((i - base_bit) > max_hop) {
554 if (last_free_bits == move_len) {
556 *phys_cpos = base_cpos +
i;
561 mlog(0,
"found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
564 static int ocfs2_alloc_dinode_update_counts(
struct inode *inode,
566 struct buffer_head *di_bh,
585 le32_add_cpu(&cl->
cl_recs[chain].c_free, -num_bits);
592 static inline int ocfs2_block_group_set_bits(handle_t *handle,
593 struct inode *alloc_inode,
595 struct buffer_head *group_bh,
596 unsigned int bit_off,
597 unsigned int num_bits)
608 mlog(0,
"block_group_set_bits: off = %u, num = %u\n", bit_off,
611 if (ocfs2_is_cluster_bitmap(alloc_inode))
615 INODE_CACHE(alloc_inode),
626 " count %u but claims %u are freed. num_bits %d",
642 u32 cpos,
u32 phys_cpos,
u32 *new_phys_cpos,
643 u32 len,
int ext_flags)
645 int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
647 struct inode *inode = context->
inode;
650 struct inode *gb_inode =
NULL;
651 struct buffer_head *gb_bh =
NULL;
652 struct buffer_head *gd_bh =
NULL;
655 u32 move_max_hop = ocfs2_blocks_to_clusters(inode->
i_sb,
656 context->
range->me_threshold);
657 u64 phys_blkno, new_phys_blkno;
659 phys_blkno = ocfs2_clusters_to_blocks(inode->
i_sb, phys_cpos);
661 if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
663 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
687 ret = ocfs2_lock_allocators_move_extents(inode, &context->
et, len, 1,
689 NULL, extra_blocks, &credits);
718 goto out_unlock_gb_mutex;
724 if (IS_ERR(handle)) {
725 ret = PTR_ERR(handle);
727 goto out_unlock_tl_inode;
730 new_phys_blkno = ocfs2_clusters_to_blocks(inode->
i_sb, *new_phys_cpos);
731 ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
746 ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
748 if (!*new_phys_cpos) {
753 ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
754 *new_phys_cpos, ext_flags);
761 ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
768 ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
809 static void ocfs2_calc_extent_defrag_len(
u32 *alloc_size,
u32 *len_defraged,
812 if ((*alloc_size + *len_defraged) < threshold) {
816 *len_defraged += *alloc_size;
817 }
else if (*len_defraged == 0) {
831 *alloc_size = threshold - *len_defraged;
836 static int __ocfs2_move_extents_range(
struct buffer_head *di_bh,
839 int ret = 0,
flags, do_defrag, skip = 0;
840 u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
841 u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
843 struct inode *inode = context->
inode;
857 ocfs2_init_dealloc_ctxt(&context->
dealloc);
872 move_start = ocfs2_clusters_for_bytes(osb->
sb, range->
me_start);
875 if (len_to_move >= move_start)
876 len_to_move -= move_start;
882 if (defrag_thresh <= 1)
885 new_phys_cpos = ocfs2_blocks_to_clusters(inode->
i_sb,
888 mlog(0,
"Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
890 (
unsigned long long)OCFS2_I(inode)->ip_blkno,
891 (
unsigned long long)range->
me_start,
892 (
unsigned long long)range->
me_len,
893 move_start, len_to_move, defrag_thresh);
896 while (len_to_move) {
904 if (alloc_size > len_to_move)
905 alloc_size = len_to_move;
921 ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
922 defrag_thresh, &skip);
931 mlog(0,
"#Defrag: cpos: %u, phys_cpos: %u, "
932 "alloc_size: %u, len_defraged: %u\n",
933 cpos, phys_cpos, alloc_size, len_defraged);
935 ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
938 ret = ocfs2_move_extent(context, cpos, phys_cpos,
939 &new_phys_cpos, alloc_size,
942 new_phys_cpos += alloc_size;
953 len_to_move -= alloc_size;
975 struct inode *inode = context->
inode;
977 struct buffer_head *di_bh =
NULL;
983 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1008 status = __ocfs2_move_extents_range(di_bh, context);
1010 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1013 goto out_inode_unlock;
1020 if (IS_ERR(handle)) {
1021 status = PTR_ERR(handle);
1023 goto out_inode_unlock;
1058 struct inode *inode = filp->
f_path.dentry->d_inode;
1082 context->
file = filp;
1094 if (range.
me_start > i_size_read(inode))
1125 status = ocfs2_validate_and_adjust_move_goal(inode, &range);